text
string
size
int64
token_count
int64
#!/usr/bin/python3 """add.py""" from sys import argv import datetime import sqlite3 import pathlib PATH = pathlib.Path.cwd() HELP_TEXT = ''' Usage: add.py [-h] directory -h, --help bring up this help message directory directory with certs to add ''' def add_certs(cert_dir: str) -> None: """Add new certs to database. Initialize database if none exists.""" # If DATABASE does not exist, initialize it d_b = cert_dir + '.db' if (PATH / d_b).is_file() is False: con = sqlite3.connect(d_b) cursor_obj = con.cursor() cursor_obj.execute( 'CREATE TABLE certs(id text PRIMARY KEY, date_added text, applied integer, date_applied text, banned integer, banned_date text, required_activation integer, currently_used integer)' ) # Add new cert file info for all UNIQUE cert files from directory con = sqlite3.connect(d_b) cursor_obj = con.cursor() added_certs = [] skipped_certs = [] add_path = PATH / cert_dir for cert_file in add_path.iterdir(): # Check that file in directory is indeed a cert file and set values if cert_file.is_file( ) and cert_file.suffix == '.txt': # TODO find file sig cert_name = cert_file.name added = datetime.datetime.now() entities = (cert_name, added, 0, 0, 0, 0) # Try to add UNIQUE cert file to DATABASE try: cursor_obj.execute( 'INSERT INTO certs(id, date_added, applied, banned, required_activation, currently_used) VALUES(?, ?, ?, ?, ?, ?)', entities) con.commit() added_certs.append(cert_name) # If cert file is already in DATABASE then skip except sqlite3.IntegrityError: skipped_certs.append(cert_name) con.close() # Print output if skipped_certs: print('\n[*] Already in DATABASE, skipping:\n') for _x in skipped_certs: print('\t' + _x) if added_certs: print('\n\n[*] Added to the DATABASE:\n') for _x in added_certs: print('\t' + _x) print(f'\n\n[*] Added: {len(added_certs)}') print(f'[*] Skipped {len(skipped_certs)}\n') if __name__ == '__main__': # Check for help flag if len(argv) < 2 or argv[1] == '--help' or argv[1] == '-h': print(HELP_TEXT) quit() # Check if directory name is valid, run stuff if so if (PATH / argv[1]).is_dir(): CERT_DIR = argv[1] if CERT_DIR[-1] == '/': CERT_DIR = CERT_DIR[:-1] try: add_certs(CERT_DIR) except KeyboardInterrupt: quit() else: print(f'\n[*] {argv[1]} not a valid directory\n')
2,776
902
# -*- coding: utf-8 -*- """ Created with IntelliJ IDEA. Description: User: jinhuichen Date: 3/28/2018 4:17 PM Description: """ from mrq.dashboard.app import main if __name__ == '__main__': main()
209
93
#!/usr/bin/env python3 def reverse_words(s): return ' '.join(w[::-1] for w in s.split(' ')) def reverse_words_ext(s): # support other whitespaces strs, word = [], '' for c in s: if c.isspace(): if word: strs.append(word[::-1]) word = '' strs.append(c) else: word += c if word: strs.append(word[::-1]) return ''.join(strs) if __name__ == '__main__': s = input() for f in (reverse_words, reverse_words_ext): print(f(s))
556
197
import requests def ok(event, context): url = "http://ok:8080/" response = requests.request("GET", url) return response.text
140
47
""" Python Curve Generator @Guilherme Trevisan - github.com/TrevisanGMW/gt-tools - 2020-01-02 1.1 - 2020-01-03 Minor patch adjustments to the script 1.2 - 2020-06-07 Fixed random window widthHeight issue. Updated naming convention to make it clearer. (PEP8) Added length checker for selection before running. 1.3 - 2020-06-17 Changed UI Added help menu Added icon 1.4 - 2020-06-27 No longer failing to generate curves with non-unique names Tweaked the color and text for the title and help menu 1.5 - 2021-01-26 Fixed way the curve is generated to account for closed and opened curves 1.6 - 2021-05-12 Made script compatible with Python 3 (Maya 2022+) """ import maya.cmds as cmds import sys from decimal import * from maya import OpenMayaUI as omui try: from shiboken2 import wrapInstance except ImportError: from shiboken import wrapInstance try: from PySide2.QtGui import QIcon from PySide2.QtWidgets import QWidget except ImportError: from PySide.QtGui import QIcon, QWidget # Script Name script_name = "GT - Generate Python Curve" # Version: script_version = "1.6" #Python Version python_version = sys.version_info.major # Default Settings close_curve = False add_import = False # Function for the "Run Code" button def run_output_code(out): try: exec(out) except Exception as e: cmds.warning("Something is wrong with your code!") cmds.warning(e) # Main Form ============================================================================ def build_gui_py_curve(): window_name = "build_gui_py_curve" if cmds.window(window_name, exists =True): cmds.deleteUI(window_name) # Main GUI Start Here ================================================================================= build_gui_py_curve = cmds.window(window_name, title=script_name + ' (v' + script_version + ')',\ titleBar=True, mnb=False, mxb=False, sizeable =True) cmds.window(window_name, e=True, s=True, wh=[1,1]) content_main = cmds.columnLayout(adj = True) # Title title_bgc_color = (.4, .4, .4) cmds.separator(h=10, style='none') # Empty Space cmds.rowColumnLayout(nc=1, cw=[(1, 270)], cs=[(1, 10)], p=content_main) # Window Size Adjustment cmds.rowColumnLayout(nc=3, cw=[(1, 10), (2, 200), (3, 50)], cs=[(1, 10), (2, 0), (3, 0)], p=content_main) # Title Column cmds.text(" ", bgc=title_bgc_color) # Tiny Empty Green Space cmds.text(script_name, bgc=title_bgc_color, fn="boldLabelFont", align="left") cmds.button( l ="Help", bgc=title_bgc_color, c=lambda x:build_gui_help_py_curve()) cmds.separator(h=10, style='none', p=content_main) # Empty Space # Body ==================== body_column = cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main) cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)]) settings = cmds.checkBoxGrp(columnWidth2=[150, 1], numberOfCheckBoxes=2, \ label1 = 'Add import \"maya.cmds\" ', label2 = "Force Open", v1 = add_import, v2 = close_curve) cmds.rowColumnLayout(nc=1, cw=[(1, 230)], cs=[(1,0)]) cmds.separator(h=10, style='none') # Empty Space cmds.button(l ="Generate", bgc=(.6, .6, .6), c=lambda x:generate_python_curve()) cmds.separator(h=10, style='none', p=content_main) # Empty Space cmds.separator(h=10, p=content_main) # Bottom ==================== cmds.rowColumnLayout(nc=1, cw=[(1, 260)], cs=[(1,10)], p=content_main) cmds.text(label='Output Python Curve' ) output_python = cmds.scrollField(editable=True, wordWrap=True) cmds.separator(h=10, style='none') # Empty Space cmds.button(l ="Run Code", c=lambda x:run_output_code(cmds.scrollField(output_python, query=True, text=True))) cmds.separator(h=10, style='none') # Empty Space def generate_python_curve(): not_curve_error = "Please make sure you selected a Nurbs Curve or a Bezier Curve object before generating it" if len(cmds.ls(selection=True)) != 0: getcontext().prec = 5 sel_one = cmds.ls(sl=1)[0] shape = cmds.listRelatives(sel_one, s=1 , fullPath=True)[0] type_checker = str(cmds.objectType(shape)) if "nurbsCurve" in type_checker or "bezierCurve" in type_checker: opened_curve = cmds.checkBoxGrp (settings, q=True, value2=True) per_state = cmds.getAttr(shape + '.form') knots_string = '' extra_cvs_per = '' is_periodic = False if not opened_curve and per_state == 2: is_periodic=True curve_info = cmds.arclen(sel_one, ch=True) curve_knots = cmds.getAttr( curve_info + '.knots[*]' ) knots_string = ', per=True, k=' + str(curve_knots) cmds.delete(curve_info) cvs = cmds.getAttr(shape+'.cv[*]') cvs_list = [] for c in cvs: cvs_list.append([float(Decimal("%.3f" % c[0])),float(Decimal("%.3f" % c[1])),float(Decimal("%.3f" % c[2]))]) if is_periodic and len(cvs) > 2: extra_cvs_per = ', ' for i in range(3): if i != 2: extra_cvs_per += str(cvs_list[i]) + ', ' else: extra_cvs_per += str(cvs_list[i]) if cmds.checkBoxGrp(settings, q=True, value1=True): out = 'import maya.cmds as cmds\n\ncmds.curve(p=' else: out = 'cmds.curve(p=' out += '[%s' % ', '.join(map(str, cvs_list)) out += extra_cvs_per + '], d='+str(cmds.getAttr(shape+'.degree'))+ knots_string + ')' print ("#" * 100) print (out) print ("#" * 100) cmds.scrollField(output_python, edit=True, wordWrap=True, text=out ,sl=True) cmds.setFocus(output_python) else: cmds.warning(not_curve_error) cmds.scrollField(output_python, edit=True, wordWrap=True, text=not_curve_error ,sl=True) cmds.setFocus(output_python) else: cmds.warning(not_curve_error) # Show and Lock Window cmds.showWindow(build_gui_py_curve) cmds.window(window_name, e=True, s=False) # Set Window Icon qw = omui.MQtUtil.findWindow(window_name) if python_version == 3: widget = wrapInstance(int(qw), QWidget) else: widget = wrapInstance(long(qw), QWidget) icon = QIcon(':/pythonFamily.png') widget.setWindowIcon(icon) # Main GUI Ends Here ================================================================================= # Creates Help GUI def build_gui_help_py_curve(): window_name = "build_gui_help_py_curve" if cmds.window(window_name, exists=True): cmds.deleteUI(window_name, window=True) cmds.window(window_name, title= script_name + " Help", mnb=False, mxb=False, s=True) cmds.window(window_name, e=True, s=True, wh=[1,1]) cmds.columnLayout("main_column", p= window_name) # Title Text cmds.separator(h=12, style='none') # Empty Space cmds.rowColumnLayout(nc=1, cw=[(1, 310)], cs=[(1, 10)], p="main_column") # Window Size Adjustment cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1, 10)], p="main_column") # Title Column cmds.text(script_name + " Help", bgc=[.4,.4,.4], fn="boldLabelFont", align="center") cmds.separator(h=10, style='none', p="main_column") # Empty Space # Body ==================== cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column") cmds.text(l='This script generates the Python code necessary to create', align="left") cmds.text(l='a selected curve.', align="left") cmds.separator(h=10, style='none') # Empty Space cmds.text(l='Make sure you delete the curve\'s history before ', align="left") cmds.text(l='generating the code.', align="left") cmds.separator(h=15, style='none') # Empty Space cmds.text(l='Add import "maya.cmds":', align="left", fn="boldLabelFont") cmds.text(l='Adds a line that imports Maya\'s API. This is necessary', align="left") cmds.text(l='when running python scripts.', align="left") cmds.separator(h=15, style='none') # Empty Space cmds.text(l='Force Open: ', align="left", fn="boldLabelFont") cmds.text(l='Doens\'t check if the curve is periodic leaving it open.', align="left") cmds.separator(h=15, style='none') # Empty Space cmds.text(l='"Generate" button:', align="left", fn="boldLabelFont") cmds.text(l='Outputs the python code necessary to create the curve', align="left") cmds.text(l='inside the "Output Python Curve" box.', align="left") cmds.separator(h=15, style='none') # Empty Space cmds.text(l='Run Code:', align="left", fn="boldLabelFont") cmds.text(l='Attempts to run the code (or anything written) inside ', align="left") cmds.text(l='"Output Python Curve" box', align="left") cmds.separator(h=15, style='none') # Empty Space cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column") cmds.text('Guilherme Trevisan ') cmds.text(l='<a href="mailto:trevisangmw@gmail.com">TrevisanGMW@gmail.com</a>', hl=True, highlightColor=[1,1,1]) cmds.rowColumnLayout(nc=2, cw=[(1, 140),(2, 140)], cs=[(1,10),(2, 0)], p="main_column") cmds.separator(h=15, style='none') # Empty Space cmds.text(l='<a href="https://github.com/TrevisanGMW">Github</a>', hl=True, highlightColor=[1,1,1]) cmds.separator(h=7, style='none') # Empty Space # Close Button cmds.rowColumnLayout(nc=1, cw=[(1, 300)], cs=[(1,10)], p="main_column") cmds.separator(h=10, style='none') cmds.button(l='OK', h=30, c=lambda args: close_help_gui()) cmds.separator(h=8, style='none') # Show and Lock Window cmds.showWindow(window_name) cmds.window(window_name, e=True, s=False) # Set Window Icon qw = omui.MQtUtil.findWindow(window_name) if python_version == 3: widget = wrapInstance(int(qw), QWidget) else: widget = wrapInstance(long(qw), QWidget) icon = QIcon(':/question.png') widget.setWindowIcon(icon) def close_help_gui(): if cmds.window(window_name, exists=True): cmds.deleteUI(window_name, window=True) #Build UI if __name__ == '__main__': build_gui_py_curve()
11,166
4,109
import pygame from config import Config from core.ui import Table, Button from core.scene import Scene from core.manager import SceneManager from core.scene.preload import Preload class SummaryScene(Scene): def __init__(self, game): super().__init__(game) self._background = pygame.display.get_surface() self._background.set_alpha(180) self._background.fill(Config.colors['black']) self._elements.clear(self._canvas, self._background) w, h = (200, 200) rowData = [('Score', 'Wave'), (str(self._parent._pilot.score), str(self._parent._pilot.wave))] columnWidth = [100, 100] self._scoreBoard = Table(self, w, h, rowData, columnWidth, title='Summary', line=False, button=False) self._scoreBoard.rect.centerx = Config.windowWidth//2 self._scoreBoard.rect.centery = Config.windowHeight//2 self.addElement(self._scoreBoard) def callBack(): # SceneManager.call(MainScene(self._parent), Preload(self._parent)) self._parent._pilot.update() SceneManager.ret(Preload(self._parent)) self._btn = Button(self, 'Continue', callBack) self._btn.rect.right = self._scoreBoard.rect.right self._btn.rect.top = self._scoreBoard.rect.bottom self.addElement(self._btn) self.addEventListener(self._btn.handleEvent) def loadData(self): pass def run(self): for event in pygame.event.get(): self._handleEvent(event) self.update() self.draw() self._clock.tick(Config.ticks) def update(self): super().update() def draw(self): updatedRects = self._elements.draw(self._canvas) pygame.display.update(updatedRects)
1,766
534
from __future__ import absolute_import from __future__ import print_function from __future__ import division from mwptoolkit.module.Encoder import graph_based_encoder,rnn_encoder,transformer_encoder
198
59
# Copyright 2010 Alon Zakai ('kripken'). All rights reserved. # This file is part of Syntensity/the Intensity Engine, an open source project. See COPYING.txt for licensing. from intensity.signals import client_connect, client_disconnect from intensity.base import quit class Data: counter = 0 def add(sender, **kwargs): Data.counter += 1 client_connect.connect(add, weak=False) def subtract(sender, **kwargs): Data.counter -= 1 if Data.counter <= 0: quit() client_disconnect.connect(subtract, weak=False)
546
178
#!/usr/bin/python # -*- coding: utf-8 -*- __author__ = "Ricardo Ribeiro" __credits__ = ["Ricardo Ribeiro"] __license__ = "MIT" __version__ = "0.0" __maintainer__ = "Ricardo Ribeiro" __email__ = "ricardojvr@gmail.com" __status__ = "Development" import time from datetime import datetime, timedelta def timeit(method): def timed(*args, **kw): ts = time.time() result = method(*args, **kw) te = time.time() time_elapsed = datetime(1,1,1) + timedelta(seconds=(te-ts) ) print("%s: %d:%d:%d:%d;%d" % (method.__name__, time_elapsed.day-1, time_elapsed.hour, time_elapsed.minute, time_elapsed.second, time_elapsed.microsecond)) return result return timed
694
281
""" Created by vcokltfre at 2020-07-08 """ import json import logging import time from datetime import datetime import discord from discord.ext import commands from discord.ext.commands import has_any_role class BotInfo(commands.Cog): def __init__(self, bot): self.bot = bot self.logger = logging.getLogger("salbot.cogs.botinfo") self.uptime_start = round(time.time()) self.socket_stats = {} self.opcodes = { 10: "HELLO", 11: "HEARTBEAT", 9: "HI", 7: "RECONNECT" } @commands.Cog.listener() async def on_socket_response(self, data): t = data["t"] if not t: try: t = self.opcodes[data["op"]] except KeyError: self.logger.warning(f"Unknown opcode. Received: {data['op']}") self.socket_stats[t] = self.socket_stats.get(t, 0) + 1 @commands.command(name="stats") @has_any_role("Administrator", "Moderator") async def stats_bot(self, ctx, typ="raw"): if typ == "raw": jsd = json.dumps(self.socket_stats, indent=4) desc = f"```json\n{jsd}```" embed = discord.Embed(title="Raw Socket Stats", color=0xFF0000, description=desc, timestamp=datetime.now()) await ctx.channel.send(embed=embed) def setup(bot): bot.add_cog(BotInfo(bot))
1,388
466
import inspect import logging from collections import OrderedDict from functools import wraps from typing import TYPE_CHECKING, Any, Callable, Optional, Type, Union, cast, overload from django.core.paginator import InvalidPage, Page, Paginator from django.db.models import QuerySet from django.http import HttpRequest from ninja import Schema from ninja.constants import NOT_SET from ninja.pagination import LimitOffsetPagination, PageNumberPagination, PaginationBase from ninja.signature import has_kwargs from ninja.types import DictStrAny from pydantic import Field from ninja_extra.conf import settings from ninja_extra.exceptions import NotFound from ninja_extra.schemas import PaginatedResponseSchema from ninja_extra.urls import remove_query_param, replace_query_param logger = logging.getLogger() if TYPE_CHECKING: from .controllers import ControllerBase # pragma: no cover __all__ = [ "PageNumberPagination", "PageNumberPaginationExtra", "PaginationBase", "LimitOffsetPagination", "paginate", "PaginatedResponseSchema", ] def _positive_int( integer_string: Union[str, int], strict: bool = False, cutoff: Optional[int] = None ) -> int: """ Cast a string to a strictly positive integer. """ ret = int(integer_string) if ret < 0 or (ret == 0 and strict): raise ValueError() if cutoff: return min(ret, cutoff) return ret class PageNumberPaginationExtra(PaginationBase): class Input(Schema): page: int = Field(1, gt=0) page_size: int = Field(100, lt=200) page_query_param = "page" page_size_query_param = "page_size" max_page_size = 200 paginator_class = Paginator def __init__( self, page_size: int = settings.PAGINATION_PER_PAGE, max_page_size: Optional[int] = None, ) -> None: super().__init__() self.page_size = page_size self.max_page_size = max_page_size or 200 self.Input = self.create_input() # type:ignore def create_input(self) -> Type[Input]: class DynamicInput(PageNumberPaginationExtra.Input): page: int = Field(1, gt=0) page_size: int = Field(self.page_size, lt=self.max_page_size) return DynamicInput def paginate_queryset( self, items: QuerySet, request: HttpRequest, **params: Any ) -> Any: pagination_input = cast(PageNumberPaginationExtra.Input, params["pagination"]) page_size = self.get_page_size(pagination_input.page_size) current_page_number = pagination_input.page paginator = self.paginator_class(items, page_size) try: url = request.build_absolute_uri() page: Page = paginator.page(current_page_number) return self.get_paginated_response(base_url=url, page=page) except InvalidPage as exc: msg = "Invalid page. {page_number} {message}".format( page_number=current_page_number, message=str(exc) ) raise NotFound(msg) def get_paginated_response(self, *, base_url: str, page: Page) -> DictStrAny: return OrderedDict( [ ("count", page.paginator.count), ("next", self.get_next_link(base_url, page=page)), ("previous", self.get_previous_link(base_url, page=page)), ("results", list(page)), ] ) @classmethod def get_response_schema( cls, response_schema: Union[Schema, Type[Schema], Any] ) -> Any: return PaginatedResponseSchema[response_schema] def get_next_link(self, url: str, page: Page) -> Optional[str]: if not page.has_next(): return None page_number = page.next_page_number() return replace_query_param(url, self.page_query_param, page_number) def get_previous_link(self, url: str, page: Page) -> Optional[str]: if not page.has_previous(): return None page_number = page.previous_page_number() if page_number == 1: return remove_query_param(url, self.page_query_param) return replace_query_param(url, self.page_query_param, page_number) def get_page_size(self, page_size: int) -> int: if page_size: try: return _positive_int(page_size, strict=True, cutoff=self.max_page_size) except (KeyError, ValueError): pass return self.page_size @overload def paginate() -> Callable[..., Any]: ... @overload def paginate( func_or_pgn_class: Any = NOT_SET, **paginator_params: Any ) -> Callable[..., Any]: ... def paginate( func_or_pgn_class: Any = NOT_SET, **paginator_params: Any ) -> Callable[..., Any]: isfunction = inspect.isfunction(func_or_pgn_class) isnotset = func_or_pgn_class == NOT_SET pagination_class: Type[PaginationBase] = settings.PAGINATION_CLASS if isfunction: return _inject_pagination(func_or_pgn_class, pagination_class) if not isnotset: pagination_class = func_or_pgn_class def wrapper(func: Callable[..., Any]) -> Any: return _inject_pagination(func, pagination_class, **paginator_params) return wrapper def _inject_pagination( func: Callable[..., Any], paginator_class: Type[PaginationBase], **paginator_params: Any, ) -> Callable[..., Any]: func.has_kwargs = True # type: ignore if not has_kwargs(func): func.has_kwargs = False # type: ignore logger.debug( f"function {func.__name__} should have **kwargs if you want to use pagination parameters" ) paginator: PaginationBase = paginator_class(**paginator_params) paginator_kwargs_name = "pagination" @wraps(func) def view_with_pagination( controller: "ControllerBase", *args: Any, **kw: Any ) -> Any: func_kwargs = dict(kw) if not func.has_kwargs: # type: ignore func_kwargs.pop(paginator_kwargs_name) items = func(controller, *args, **func_kwargs) assert ( controller.context and controller.context.request ), "Request object is None" return paginator.paginate_queryset(items, controller.context.request, **kw) view_with_pagination._ninja_contribute_args = [ # type: ignore ( paginator_kwargs_name, paginator.Input, paginator.InputSource, ), ] return view_with_pagination
6,486
2,029
# !/usr/bin/python3 from tkinter import * top = Tk() top.geometry("400x250") name = Label(top, text = "Name").place(x = 30,y = 50) email = Label(top, text = "Email").place(x = 30, y = 90) password = Label(top, text = "Password").place(x = 30, y = 130) sbmitbtn = Button(top, text = "Submit",activebackground = "pink", activeforeground = "blue").place(x = 30, y = 170) e1 = Entry(top).place(x = 80, y = 50) e2 = Entry(top).place(x = 80, y = 90) e3 = Entry(top, show="*").place(x = 95, y = 130) top.mainloop()
547
257
# -*- coding: utf-8 -*- # Generated by Django 1.9.6 on 2016-06-06 22:16 from __future__ import unicode_literals from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('yafa', '0001_initial'), ] operations = [ migrations.RemoveField( model_name='site', name='name', ), migrations.RemoveField( model_name='zone', name='name', ), migrations.AddField( model_name='site', name='slug', field=models.SlugField(default='', max_length=250), preserve_default=False, ), migrations.AddField( model_name='zone', name='slug', field=models.SlugField(default='', max_length=250), preserve_default=False, ), ]
873
270
from django.urls import path from toys.views import (toy_list_view, toy_detail_view, toy_sql_view, toy_raw_sql_view, toy_aggregate_view) app_name = "toys" urlpatterns = [ path("toys/", toy_list_view, name="toys_list"), path("toys_sql/", toy_sql_view, name="toys_sql_list"), path("toys/count/", toy_aggregate_view, name="toys_count"), path("toys_raw/", toy_raw_sql_view, name="toys_raw_list"), path("toys/<int:pk>/", toy_detail_view, name="toy_detail"), ]
501
204
'''This script goes along the blog post "Building powerful image classification models using very little data" from blog.keras.io. It uses data that can be downloaded at: https://www.kaggle.com/c/dogs-vs-cats/data In our setup, we: - created a data/ folder - created train/ and validation/ subfolders inside data/ - created cats/ and dogs/ subfolders inside train/ and validation/ - put the cat pictures index 0-999 in data/train/cats - put the cat pictures index 1000-1400 in data/validation/cats - put the dogs pictures index 12500-13499 in data/train/dogs - put the dog pictures index 13500-13900 in data/validation/dogs So that we have 1000 training examples for each class, and 400 validation examples for each class. In summary, this is our directory structure: ``` data/ train/ dogs/ dog001.jpg dog002.jpg ... cats/ cat001.jpg cat002.jpg ... validation/ dogs/ dog001.jpg dog002.jpg ... cats/ cat001.jpg cat002.jpg ... ``` ''' import os import h5py import numpy as np from keras.preprocessing.image import ImageDataGenerator from keras.preprocessing import image as image_utils from keras import optimizers from keras.models import Sequential from keras.layers import Convolution2D, MaxPooling2D, ZeroPadding2D from keras.layers import Activation, Dropout, Flatten, Dense #image input utils from Tkinter import Tk from tkFileDialog import askopenfilename # path to the model weights files. weights_path = 'data/models/vgg16_weights.h5' top_model_weights_path = 'data/models/bottleneck_fc_model.h5' # dimensions of our images. img_width, img_height = 150, 150 train_data_dir = 'data/train' validation_data_dir = 'data/validation' nb_train_samples = 2000 nb_validation_samples = 800 nb_epoch = 50 # build the VGG16 network model = Sequential() model.add(ZeroPadding2D((1, 1), input_shape=(3, img_width, img_height))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(64, 3, 3, activation='relu', name='conv1_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(128, 3, 3, activation='relu', name='conv2_2')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(256, 3, 3, activation='relu', name='conv3_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv4_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_1')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_2')) model.add(ZeroPadding2D((1, 1))) model.add(Convolution2D(512, 3, 3, activation='relu', name='conv5_3')) model.add(MaxPooling2D((2, 2), strides=(2, 2))) # load the weights of the VGG16 networks # (trained on ImageNet, won the ILSVRC competition in 2014) # note: when there is a complete match between your model definition # and your weight savefile, you can simply call model.load_weights(filename) assert os.path.exists(weights_path), 'Model weights not found (see "weights_path" variable in script).' f = h5py.File(weights_path) for k in range(f.attrs['nb_layers']): if k >= len(model.layers): # we don't look at the last (fully-connected) layers in the savefile break g = f['layer_{}'.format(k)] weights = [g['param_{}'.format(p)] for p in range(g.attrs['nb_params'])] model.layers[k].set_weights(weights) f.close() print('Model loaded.') # build a classifier model to put on top of the convolutional model top_model = Sequential() top_model.add(Flatten(input_shape=model.output_shape[1:])) top_model.add(Dense(256, activation='relu')) top_model.add(Dropout(0.0)) #Should have 0 dropout for predicition. But still need model structure so set to 0. top_model.add(Dense(1, activation='sigmoid')) print('[INFO] loading weights. May take a while...') # note that it is necessary to start with a fully-trained # classifier, including the top classifier, # in order to successfully do fine-tuning top_model.load_weights(top_model_weights_path) # add the model on top of the convolutional base model.add(top_model) # TODO: create test_data in appropriate format. print("[INFO] loading and preprocessing image...") Tk().withdraw() # we don't want a full GUI, so keep the root window from appearing filename = askopenfilename() # show an "Open" dialog box and return the path to the selected file image = image_utils.load_img(filename, target_size=(img_width, img_height)) image = image_utils.img_to_array(image) #array should be (3,150,150) image = np.expand_dims(image, axis=0) #expand to shape (1,3,150, 150) pDOG = model.predict(image)[0][0] pCAT = 1. - pDOG print 'Image {} percent dog and {} percent cat'.format(pDOG*100.,pCAT*100.)
5,590
2,069
""" TODO: Once I finish the d zero and high paper, I will port the code here. TODO: also put the epochs training, for the ml vs maml paper with synthetic data. """
163
50
from setuptools import setup from os import path # read the contents of your README file curr_dir = path.abspath(path.dirname(__file__)) with open(path.join(curr_dir, "README.md"), encoding="utf-8") as f: long_description = f.read() setup( name="kedro-light", version="0.1", description="A lightweight interface to Kedro", long_description=long_description, long_description_content_type="text/markdown", url="https://github.com/ellwise/naive-bayes-explainer", author="Elliott Wise", author_email="ell.wise@gmail.com", license="MIT", packages=["kedro_light"], install_requires=["kedro"], include_package_data=True, zip_safe=False, )
692
235
import random from flask import Flask, request, render_template, jsonify app = Flask(__name__) data_list = [] with open('data.txt', 'r') as data_file: data_list = data_file.readlines() @app.route("/", methods=['GET']) def index(): index = random.randint(1, len(data_list) - 1) clue = data_list[index].split('|')[0] return render_template('game.html', clue=clue.strip(), index=index) @app.route("/check") def checkAnswer(): ind = int(request.args.get("index")) ans = request.args.get("answer").strip().upper() correct_answer = data_list[ind].split('|')[1].strip() return "You got it right!" if (ans == correct_answer) else "Wrong Answer! Please try again!!" @app.route("/show") def showAnswer(): ind = int(request.args.get("index")) return data_list[ind].split('|')[1].strip() @app.route("/new") def newClue(): index = random.randint(1, len(data_list) - 1) clue = data_list[index].split('|')[0].strip() response = { 'index': index, 'clue': clue } return jsonify(response) if __name__ == "__main__": app.run(host='0.0.0.0')
1,217
430
# -*- coding: utf-8 -*- """Plotting.py for notebook 05_Preliminary_comparison_of_simulations_AGN_fraction_with_data This python file contains all the functions used for plotting graphs and maps in the 2nd notebook (.ipynb) of the repository: 05. Preliminary comparison of the 𝑓MM between simulation and data Script written by: Soumya Shreeram Project supervised by Johan Comparat Date created: 27th April 2021 """ # astropy modules import astropy.units as u import astropy.io.fits as fits from astropy.table import Table, Column from astropy.coordinates import SkyCoord from astropy.cosmology import FlatLambdaCDM, z_at_value import numpy as np # scipy modules from scipy.spatial import KDTree from scipy.interpolate import interp1d import os import importlib # plotting imports import matplotlib from mpl_toolkits import axes_grid1 import matplotlib.pyplot as plt from mpl_toolkits.mplot3d.axes3d import Axes3D from matplotlib.ticker import LinearLocator, FormatStrFormatter from matplotlib import cm from matplotlib.collections import PatchCollection from matplotlib.patches import Rectangle import seaborn as sns import Agn_incidence_from_Major_Mergers as aimm import Comparison_simulation_with_literature_data as cswl from scipy.stats import norm def setLabel(ax, xlabel, ylabel, title='', xlim='default', ylim='default', legend=True): """ Function defining plot properties @param ax :: axes to be held @param xlabel, ylabel :: labels of the x-y axis @param title :: title of the plot @param xlim, ylim :: x-y limits for the axis """ ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) if xlim != 'default': ax.set_xlim(xlim) if ylim != 'default': ax.set_ylim(ylim) if legend: l = ax.legend(loc='best', fontsize=14, frameon=False) for legend_handle in l.legendHandles: legend_handle._legmarker.set_markersize(12) ax.grid(False) ax.set_title(title, fontsize=18) return def plotFpairs(ax, r_p, f_pairs, f_pairs_err, label, color='r', errorbar = True): # changing all unit to kpc r_p_kpc, f_pairs = 1e3*r_p[1:], f_pairs # plotting the results ax.plot( r_p_kpc , f_pairs, 's', ls='--', color=color, label = label) if errorbar: ax.errorbar(r_p_kpc , f_pairs.value, yerr=np.array(f_pairs_err), ecolor='k', fmt='none', capsize=4.5) return ax def plotScaleMMdistribution(halo_m_scale_arr_all_r, cosmo, dt_m_arr): """ Function plots the number of objects in pairs as a function of the scale of last MM --> the cuts on delta t_mm are overplotted to see the selection criterion """ fig, ax = plt.subplots(1,1,figsize=(7,6)) bins = 20 hist_all_r = np.zeros((0, bins)) for i in range(len(halo_m_scale_arr_all_r)): hist_counts, a = np.histogram(halo_m_scale_arr_all_r[i], bins=bins) hist_all_r = np.append(hist_all_r, [hist_counts], axis=0) ax.plot(a[1:], hist_counts, '--', marker = 'd', color='k') scale_mm = cswl.tmmToScale(cosmo, dt_m_arr) pal1 = sns.color_palette("Spectral", len(scale_mm)+1).as_hex() for j, l in enumerate(scale_mm): ax.vlines(l, np.min(hist_all_r), np.max(hist_all_r), colors=pal1[j], label=r'$t_{\rm MM}$ = %.1f Gyr'%dt_m_arr[j]) setLabel(ax, r'Scale factor, $a$', r'Counts', '', 'default',[np.min(hist_all_r), np.max(hist_all_r)], legend=False) ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', frameon=False) ax.set_yscale('log') return def plotNpSep(ax, hd_z_halo, pairs_all, color, label, mec, errorbars = True): """ Function plots the n_p as a function of separation """ pairs_all = np.array(pairs_all) # get shell volume and projected radius bins [Mpc] r_p, shell_volume = aimm.shellVolume() # get number density of pairs with and without selection cuts n_pairs, n_pairs_err = cswl.nPairsToFracPairs(hd_z_halo, pairs_all) # changing all unit to kpc r_p_kpc, n_pairs = 1e3*r_p[1:len(n_pairs)+1], n_pairs # plotting the results ax.plot( r_p_kpc , n_pairs, 'd', mec = mec, ms = 10, color=color, label=label) # errorbars if errorbars: n_pairs_err = np.array(n_pairs_err) ax.errorbar(r_p_kpc , np.array(n_pairs), yerr=n_pairs_err, ecolor=mec, fmt='none', capsize=4.5) return ax, n_pairs, n_pairs_err def plotFracNdensityPairs(hd_z_halo, pairs_all, pairs_mm_dv_all, pairs_selected_all, plot_selected_pairs=True): """ Function to plot the fractional number density of pairs for different selection criteria """ flare = sns.color_palette("pastel", 5).as_hex() mec = ['k', '#05ad2c', '#db5807', '#a30a26', 'b'] fig, ax = plt.subplots(1,1,figsize=(5,4)) # plotting the 4 cases with the 4 different cuts ax, n_pairs, n_pairs_err = plotNpSep(ax, hd_z_halo, pairs_all[1], 'k', r' $\mathbf{\Gamma}_{m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off}}(r)\ $', mec[0]) ax, n_mm_dv_pairs, n_pairs_mm_dv_err = plotNpSep(ax, hd_z_halo, pairs_mm_dv_all[1], flare[3], r'$\mathbf{\Gamma}_{t_{\rm MM};\ \tilde{X}_{\rm off}}(r|\ m;\ \Delta v)$', mec[3]) if plot_selected_pairs: ax, n_selected_pairs, n_selected_err = plotNpSep(ax, hd_z_halo, pairs_selected_all[1], flare[2], r'$\mathbf{\Gamma}(r|\ m;\ \Delta v;\ t_{\rm MM};\ \tilde{X}_{\rm off} )$'+'\n'+r'$t_{\rm MM} \in [0.6-1.2]$ Gyr, $\tilde{X}_{\rm off} \in [0.17, 0.54]$', mec[1]) ax.set_yscale("log") setLabel(ax, r'Separation, $r$ [kpc]', r'$\mathbf{\Gamma}(r)$ [Mpc$^{-3}$]', '', 'default', 'default', legend=False) ax.legend(bbox_to_anchor=(1.05, 1), loc='upper left', fontsize=15, frameon=False) pairs_arr = np.array([n_pairs, n_mm_dv_pairs, n_selected_pairs], dtype=object) pairs_arr_err = np.array([n_pairs_err, n_pairs_mm_dv_err, n_selected_err], dtype=object) return pairs_arr, pairs_arr_err, ax def plotCumulativeDist(vol, dt_m_arr, pairs_mm_all, pairs_mm_dv_all, n_pairs_mm_dt_all, n_pairs_mm_dv_dt_all, param = 't_mm'): """ Function to plot the cumulative number of pairs for the total vol (<z=2) for pairs with dz and mass ratio criteria """ # get shell volume and projected radius bins [Mpc] r_p, _ = aimm.shellVolume() fig, ax = plt.subplots(1,2,figsize=(17,6)) pal = sns.color_palette("coolwarm", len(dt_m_arr)+1).as_hex() ax[0].plot( (1e3*r_p[1:]), (pairs_mm_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion') ax[1].plot( (1e3*r_p[1:]), (pairs_mm_dv_all[1][1:]/(2*vol)), 'X', color='k', label='No criterion') for t_idx in range(len(dt_m_arr)): np_mm_dt, np_mm_dv_dt = n_pairs_mm_dt_all[t_idx], n_pairs_mm_dv_dt_all[t_idx] if param == 't_mm': label = r'$t_{\rm MM} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1]) else: label = r'$\tilde{X}_{\rm off} \in$ %.1f-%.1f Gyr'%(dt_m_arr[t_idx][0], dt_m_arr[t_idx][1]) ax[0].plot( (1e3*r_p[1:]), (np_mm_dt[1:]/(2*vol)), 'kX', label = label, color=pal[t_idx]) ax[1].plot( (1e3*r_p[1:]), (np_mm_dv_dt[1:]/(2*vol)), 'kX', color=pal[t_idx]) ax[0].set_yscale('log') ax[1].set_yscale('log') setLabel(ax[0], r'Separation, $r$ [kpc]', 'Cumulative number of halo pairs\n'+r'[Mpc$^{-3}$]', r'Mass ratio 3:1, $\Delta z_{\rm R, S} < 10^{-3}$', 'default', 'default', legend=False) setLabel(ax[1], r'Separation, $r$ [kpc]', r'', 'Mass ratio 3:1', 'default', 'default', legend=False) ax[0].legend(bbox_to_anchor=(-0.5, -0.7), loc='lower left', ncol=4, frameon=False) return pal def plotParameterDistributions(xoff_all, string=r'$\tilde{X}_{\rm off}$', xmax=5, filestring='xoff'): """ Function to plot the parameter distribution i.e. SF and PDF """ fig, ax = plt.subplots(1,1,figsize=(7,6)) sf_xoff = norm.sf(np.sort(xoff_all)) if string == r'$\tilde{X}_{\rm off}$': ax.plot(np.sort(xoff_all), sf_xoff, 'r-', label=r'Survival Function of '+string) xmax = np.max(xoff_all) else: ax.plot(np.sort(xoff_all), 1-sf_xoff, 'r-', label=r'CDF of '+string) pdf_xoff = norm.pdf(np.sort(xoff_all)) ax.plot(np.sort(xoff_all), pdf_xoff, 'k-', label=r'PDF of '+string) setLabel(ax, string, 'Distribution of '+string, '', [np.min(xoff_all), xmax], 'default', legend=True) plt.savefig('../figures/'+filestring+'_function.png', facecolor='w', edgecolor='w', bbox_inches='tight') return ax def axId(i): if i == 0: m, n = 0, 0 if i == 1: m, n = 0, 1 if i == 2: m, n = 1, 0 if i == 3: m, n = 1, 1 return int(m), int(n) def plotPdf(ax, arr, string, color): pdf_arr = norm.pdf(np.sort(arr)) ax.plot(np.sort(arr), pdf_arr, '-', color=color, label=r'PDF of '+string, lw=4) return def saveFig(filename): plt.savefig('../figures/'+filename, facecolor='w', edgecolor='w', bbox_inches='tight') return def plotContour(u_pix, matrix_2D, xmin=10, xmax=150, ymin=0, ymax=2, ax=None, cmap='YlGnBu'): """ Function plots a contour map @u_pix :: number of pixels in the FOV @Returns :: 2D matrix """ if ax == None: fig, ax = plt.subplots(1,1,figsize=(7,6)) if isinstance(u_pix, (int, float)): X, Y = np.meshgrid(np.linspace(0, u_pix, u_pix), np.linspace(0, u_pix, u_pix)) if isinstance(u_pix, (list, tuple, np.ndarray)): # if FOV is a rectangle X, Y = np.meshgrid(np.linspace(xmin, xmax, u_pix[0]), np.linspace(ymin, ymax, u_pix[1])) plot = ax.contourf(X, Y, matrix_2D, cmap=cmap, origin='image') return ax, plot def labelMZTmmXoff(ax, ylabel, redshift_limit=2): setLabel(ax[0, 0], r'Stellar mass, $\log{M^*}$', ylabel, '', 'default', 'default', legend=False) setLabel(ax[0, 1], 'Redshift, $z$', '', '', [0, redshift_limit], 'default', legend=False) setLabel(ax[1, 0], r'$t_{\rm MM}$', ylabel, '', 'default', 'default', legend=False) ax[1,0].set_xscale('log') setLabel(ax[1, 1], r'$\tilde{X}_{\rm off}$', '', '', 'default', 'default', legend=False) return def plotBinsMZdistribution(mz_mat_tmm0, mz_mat_tmm1, tmm_bins, param=r'$t_{\rm MM} = $'): fig, ax = plt.subplots(2,2,figsize=(15,15)) ax0, pt0 = plotContour((mz_mat_tmm0[0].shape[1], mz_mat_tmm0[0].shape[0]), mz_mat_tmm0[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 0]) ax1, pt1 = plotContour((mz_mat_tmm0[1].shape[1], mz_mat_tmm0[1].shape[0]), mz_mat_tmm0[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 0]) setLabel(ax[0, 0], '', 'Mass ratio', param+' %.2f - %.2f'%(tmm_bins[0][0], tmm_bins[0][1]), 'default', 'default', legend=False) setLabel(ax[1, 0], r'Separation, $r_p$ [kpc]', 'Mean redshift', '', 'default', 'default', legend=False) ax2, pt2 = plotContour((mz_mat_tmm1[0].shape[1], mz_mat_tmm1[0].shape[0]), mz_mat_tmm1[0], ymin=0.8, ymax=1.3, cmap='terrain', ax=ax[0, 1]) ax3, pt3 = plotContour((mz_mat_tmm1[1].shape[1], mz_mat_tmm1[1].shape[0]), mz_mat_tmm1[1], ymin=0., ymax=2, cmap='terrain', ax=ax[1, 1]) setLabel(ax[0, 1], '', '', param+ ' %.2f - %.2f'%(tmm_bins[1][0], tmm_bins[1][1]), 'default', 'default', legend=False) setLabel(ax[1, 1], r'Separation, $r_p$ [kpc]', '', '', 'default', 'default', legend=False) return def snsPlotLabels(): plt.xlabel(r'$t_{\rm MM}$ [Gyr]', fontsize=20) plt.ylabel(r'$\tilde{X}_{\rm off}$', fontsize=20) plt.xticks(fontsize=20) plt.yticks(fontsize=20) return def plotGaussianKde(param_arr, Z, string, i, j, set_xy_lim=True): xmin, xmax = np.min(param_arr[i]), np.max(param_arr[i]) ymin, ymax = np.min(param_arr[j]), np.max(param_arr[j]) fig, ax = plt.subplots(1,1,figsize=(5, 5)) ax.plot(param_arr[i], param_arr[j], 'k.', markersize=.02) ax.imshow(np.rot90(Z), cmap=plt.cm.gist_earth_r, extent=[xmin, xmax, ymin, ymax]) if set_xy_lim: ax.set_xlim([xmin, xmax]) ax.set_ylim([ymin, ymax]) setLabel(ax, string[i], string[j], '', 'default', 'default', legend=False) return ax def plotModelResults(ax, hd_halo, pairs_all, pairs_selected, vol): """ Plots the models generated for bins of Tmm and Xoff """ # get shell volume and projected radius bins [Mpc] r_p, shell_volume = aimm.shellVolume() # plotting the cumulative pairs norm = vol*len(hd_halo) np_all, np_selected = pairs_all/norm, pairs_selected[1]/norm ax[0].plot( (1e3*r_p), (np_selected), 'rX', ls = '--', ms=9, label='Selected pairs') ax[0].plot( (1e3*r_p), (np_all), 'kX', ls = '--', label = 'All pairs', ms = 9) setLabel(ax[0], r'', r'Cumulative $n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True) # plotting the pairs in bins of radius np_all_bins, np_all_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_all) np_selected_bins, np_selected_bins_err = cswl.nPairsToFracPairs(hd_halo, pairs_selected[1]) _ = plotFpairs(ax[1], r_p, np_all_bins, np_all_bins_err, label = 'All pairs', color='k') _ = plotFpairs(ax[1], r_p, np_selected_bins, np_selected_bins_err, label = 'Selected pairs') ax[1].set_yscale('log') setLabel(ax[1], r'', r'$n_{\rm halo\ pairs}}$ [Mpc$^{-3}$]', '', 'default', 'default', legend=True) # plotting the pairs in bins with respect to the control _ = plotFpairs(ax[2], r_p, np_selected_bins/np_all_bins, np_selected_bins_err, label='wrt all pairs', color='orange') setLabel(ax[2], r'Separation, $r$ [kpc]', r'Fraction of pairs, $f_{\rm halo\ pairs}}$ ', '', 'default', 'default', legend=False) return np_selected_bins/np_all_bins
13,606
5,735
"""Small lightweight utilities used frequently in GOATOOLS.""" __copyright__ = "Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved." __author__ = "DV Klopfenstein" def extract_kwargs(args, exp_keys, exp_elems): """Return user-specified keyword args in a dictionary and a set (for True/False items).""" arg_dict = {} # For arguments that have values arg_set = set() # For arguments that are True or False (present in set if True) for key, val in args.items(): if exp_keys is not None and key in exp_keys and val: arg_dict[key] = val elif exp_elems is not None and key in exp_elems and val: arg_set.add(key) return {'dict':arg_dict, 'set':arg_set} def get_kwargs_set(args, exp_elem2dflt): """Return user-specified keyword args in a dictionary and a set (for True/False items).""" arg_set = set() # For arguments that are True or False (present in set if True) # Add user items if True for key, val in args.items(): if exp_elem2dflt is not None and key in exp_elem2dflt and val: arg_set.add(key) # Add defaults if needed for key, dfltval in exp_elem2dflt.items(): if dfltval and key not in arg_set: arg_set.add(key) return arg_set def get_kwargs(args, exp_keys, exp_elems): """Return user-specified keyword args in a dictionary and a set (for True/False items).""" arg_dict = {} # For arguments that have values for key, val in args.items(): if exp_keys is not None and key in exp_keys and val: arg_dict[key] = val elif exp_elems is not None and key in exp_elems and val: arg_dict[key] = True return arg_dict # Copyright (C) 2016-2018, DV Klopfenstein, H Tang, All rights reserved.
1,797
585
description = 'Monitors the status of the Forwarder' devices = dict( KafkaForwarder=device( 'nicos_ess.devices.forwarder.EpicsKafkaForwarder', description='Monitors the status of the Forwarder', statustopic='UTGARD_forwarderStatus', brokers=['172.30.242.20:9092']), )
309
114
from argparse import ArgumentParser import logging from .config import Config import sys def main(): ap = ArgumentParser() ap.add_argument("cfgfile", nargs="+", help="Codetree configuration file") verbosity = ap.add_mutually_exclusive_group(required=False) verbosity.add_argument("-v", "--verbose", action="store_true", default=False) verbosity.add_argument("-q", "--quiet", action="store_true", default=False) verbosity.add_argument("-f", "--fatality", action="store_true", default=False, help="Any error is fatal") args = ap.parse_args() logfmt = "%(message)s" loglevel = logging.INFO if args.verbose: logfmt = "%(levelname)s: %(message)s" loglevel = logging.DEBUG if args.quiet: loglevel = logging.CRITICAL logging.basicConfig(format=logfmt, level=loglevel) config = Config(args.cfgfile) if config.build(args.fatality): sys.exit(0) else: sys.exit(1)
985
322
""" Methods for building Cognoma mutation classifiers Usage - Import only """ import pandas as pd from sklearn.metrics import roc_curve, roc_auc_score import plotnine as gg def theme_cognoma(fontsize_mult=1): return (gg.theme_bw(base_size=14 * fontsize_mult) + gg.theme(line=gg.element_line(color="#4d4d4d"), rect=gg.element_rect(fill="white", color=None), text=gg.element_text(color="black"), axis_ticks=gg.element_line(color="#4d4d4d"), legend_key=gg.element_rect(color=None), panel_border=gg.element_rect(color="#4d4d4d"), panel_grid=gg.element_line(color="#b3b3b3"), panel_grid_major_x=gg.element_blank(), panel_grid_minor=gg.element_blank(), strip_background=gg.element_rect(fill="#FEF2E2", color="#4d4d4d"), axis_text=gg.element_text(size=12 * fontsize_mult, color="#4d4d4d"), axis_title_x=gg.element_text(size=13 * fontsize_mult, color="#4d4d4d"), axis_title_y=gg.element_text(size=13 * fontsize_mult, color="#4d4d4d"))) def get_model_coefficients(classifier, feature_set, covariate_names): """ Extract the feature names and associate them with the coefficient values in the final classifier object. * Only works for expressions only model with PCA, covariates only model, and a combined model * Assumes the PCA features come before any covariates that are included * Sorts the final dataframe by the absolute value of the coefficients Args: classifier: the final sklearn classifier object feature_set: string of the model's name {expressions, covariates, full} covariate_names: list of the names of the covariate features matrix Returns: pandas.DataFrame: mapping of feature name to coefficient value """ import pandas as pd import numpy as np coefs = classifier.coef_[0] if feature_set == 'expressions': features = ['PCA_%d' % cf for cf in range(len(coefs))] elif feature_set == 'covariates': features = covariate_names else: features = ['PCA_%d' % cf for cf in range(len(coefs) - len(covariate_names))] features.extend(covariate_names) coef_df = pd.DataFrame({'feature': features, 'weight': coefs}) coef_df['abs'] = coef_df['weight'].abs() coef_df = coef_df.sort_values('abs', ascending=False) coef_df['feature_set'] = feature_set return coef_df def get_genes_coefficients(pca_object, classifier_object, expression_df, expression_genes_df, num_covariates=None): """Identify gene coefficients from classifier after pca. Args: pca_object: The pca object from running pca on the expression_df. classifier_object: The logistic regression classifier object. expression_df: The original (pre-pca) expression data frame. expression_genes_df: The "expression_genes" dataframe used for gene names. num_covariates: Optional, only needed if PCA was only performed on a subset of the features. This should be the number of features that PCA was not performed on. This function assumes that the covariates features were at the end. Returns: gene_coefficients_df: A dataframe with entreze gene-ID, gene name, coefficient abbsolute value of coefficient, and gene description. The dataframe is sorted by absolute value of coefficient. """ # Get the classifier coefficients. if num_covariates: coefficients = classifier_object.coef_[0][0:-num_covariates] else: coefficients = classifier_object.coef_[0] # Get the pca weights weights = pca_object.components_ # Combine the coefficients and weights gene_coefficients = weights.T @ coefficients.T # Create the dataframe with correct index gene_coefficients_df = pd.DataFrame(gene_coefficients, columns=['weight']) gene_coefficients_df.index = expression_df.columns gene_coefficients_df.index.name = 'entrez_id' expression_genes_df.index = expression_genes_df.index.map(str) # Add gene symbol and description gene_coefficients_df['symbol'] = expression_genes_df['symbol'] gene_coefficients_df['description'] = expression_genes_df['description'] # Add absolute value and sort by highest absolute value. gene_coefficients_df['abs'] = gene_coefficients_df['weight'].abs() gene_coefficients_df.sort_values(by='abs', ascending=False, inplace=True) # Reorder columns gene_coefficients_df = gene_coefficients_df[['symbol', 'weight', 'abs', 'description']] return(gene_coefficients_df) def select_feature_set_columns(X, feature_set, n_covariates): """ Select the feature set for the different models within the pipeline """ if feature_set == 'covariates': return X[:, :n_covariates] if feature_set == 'expressions': return X[:, n_covariates:] raise ValueError('feature_set not supported: {}'.format(feature_set)) def get_threshold_metrics(y_true, y_pred): roc_columns = ['fpr', 'tpr', 'threshold'] roc_items = zip(roc_columns, roc_curve(y_true, y_pred)) roc_df = pd.DataFrame.from_items(roc_items) auroc = roc_auc_score(y_true, y_pred) return {'auroc': auroc, 'roc_df': roc_df}
5,862
1,716
from subprocess import run cmds = [ "3-way-merge", "ci", "help", "push", "stash", "add", "clean", "hook", "rebuild", "status", "addremove", "clone", "http", "reconstruct", "sync", "alerts", "close", "import", "redo", "tag", "all", "co", "info", "remote", "tarball", "amend", "commit", "init", "remote-url", "ticket", "annotate", "configuration", "interwiki", "rename", "timeline", "artifact", "dbstat", "json", "reparent", "tls-config", "attachment", "deconstruct", "leaves", "revert", "touch", "backoffice", "delete", "login-group", "rm", "ui", "backup", "descendants", "ls", "rss", "undo", "bisect", "diff", "md5sum", "scrub", "unpublished", "blame", "export", "merge", "search", "unset", "branch", "extras", "mv", "server", "unversioned", "bundle", "finfo", "new", "settings", "update", "cache", "forget", "open", "sha1sum", "user", "cat", "fts-config", "pikchr", "sha3sum", "uv", "cgi", "gdiff", "praise", "shell", "version", "changes", "git", "publish", "sql", "whatis", "chat", "grep", "pull", "sqlar", "wiki", "checkout", "hash-policy", "purge", "sqlite3", "zip", ] with open("fossile-cmds-help.org", "w") as f: for c in cmds: d = run( ["/home/osboxes/src/fossil-snapshot-20210429/fossil", "help", c], capture_output=True, ) f.write(d.stdout.decode("utf-8"))
1,740
706
_msvc_copts = ["/std:c++17"] _clang_cl_copts = ["/std:c++17"] _gcc_copts = ["-std=c++17"] copts = select({ "@bazel_tools//tools/cpp:msvc": _msvc_copts, "@bazel_tools//tools/cpp:clang-cl": _clang_cl_copts, "//conditions:default": _gcc_copts, })
257
125
import logging import sys from trillian import TrillianLog from print_helper import Print from pprint import pprint def main(argv): logging.basicConfig(level=logging.INFO) trillian_log = TrillianLog.load_from_environment() Print.status('Checking signature on signed log root') validated_log_root = trillian_log.get_log_root() Print.tick('Log root is signed correctly by public key') # * do full audit between hash[previous] and hash[current] # * do consistency check between hash[previous] and hash[current] Print.status('Rebuilding Merkle tree from {} entries to get root ' 'hash'.format(validated_log_root.tree_size)) Print.bullet('Looking for root hash: {}'.format( validated_log_root.root_hash)) if trillian_log.full_audit(validated_log_root): Print.bullet('Calculated root hash: {}'.format( validated_log_root.root_hash)) Print.tick('Root hashes match, Merkle tree appears correct') Print.status('Showing latest log entry') Print.normal(str(trillian_log.latest().json())) print() if __name__ == '__main__': main(sys.argv)
1,154
362
""" Units module URLs """ from django.conf.urls import url, include from django.urls import path from rest_framework import routers from .viewsets import UnitSystemViewset, UnitViewset, \ ConvertView, CustomUnitViewSet from geocurrency.calculations.viewsets import ValidateViewSet, CalculationView app_name = 'units' router = routers.DefaultRouter() router.register(r'', UnitSystemViewset, basename='unit_systems') router.register(r'(?P<system_name>\w+)/units', UnitViewset, basename='units') router.register(r'(?P<system_name>\w+)/custom', CustomUnitViewSet, basename='custom') urlpatterns = [ path('convert/', ConvertView.as_view()), path('<str:unit_system>/formulas/validate/', ValidateViewSet.as_view()), path('<str:unit_system>/formulas/calculate/', CalculationView.as_view()), url(r'^', include(router.urls)), ]
875
277
from tabular import *
22
7
from django.contrib import admin from django.urls import include, path from rest_framework import routers from .shifts.views import ShiftView from .workers.views import WorkerView router = routers.DefaultRouter() router.register("workers", WorkerView) router.register("shifts", ShiftView) urlpatterns = [ path("admin/", admin.site.urls), path("", include(router.urls)), ]
383
115
from __future__ import division import numpy as np from scipy import integrate __all__ = ['area', 'simple'] def simple(p): pass def area(p): cumul = np.hstack(([0], integrate.cumtrapz(np.abs(np.gradient(p))))) return cumul / max(cumul)
257
96
# **************************************************************************** # # # # ::: :::::::: # # randominette.py :+: :+: :+: # # +:+ +:+ +:+ # # By: ayalla, sotto & dutesier +#+ +:+ +#+ # # +#+#+#+#+#+ +#+ # # Created: 2022/01/13 18:14:29 by dareias- #+# #+# # # Updated: 2022/01/20 13:10:47 by dareias- ### ########.fr # # # # **************************************************************************** # import requests import json import random import sys import pprint from decouple import config import time def main(): my_time = 1 argc = len(sys.argv) pmode = 0 if argc > 1 and sys.argv[1].find("c") > 0: pmode = 1 if len(sys.argv) > 1 and sys.argv[1].find("s") > 0: # Get Campus ID and Cluster from user campus = int(input("Campus ID (38 for Lisbon): ")) cluster = int(input("Cluster: ")) my_time = int(input("Time between requests (change at your own risk): ")) else : campus = 38 cluster = 1 if (my_time < 0): my_time = 1 print("We're not time travelers - time set to 1 second") client_id = config('42-UID') client_secret = config('42-SECRET') # Get authorization token token_url = "https://api.intra.42.fr/oauth/token" data = { "grant_type": "client_credentials", "client_id": client_id, "client_secret": client_secret } access_token = requests.post( token_url, data, ) ret = access_token if ret.status_code != 200: return(print(f"Error: Failed to get OAUTH2 token: {ret.status_code}")) ret = ret.json() # Set pagination page = { "number": 1, "size": 100 } # Pass our authorization token as a header headers = { "Authorization": f"{ret['token_type']} {ret['access_token']}", } # Pass our pagination definitions as a dict params = { "page": page } time.sleep(my_time) # Get info from the API url = f'https://api.intra.42.fr/v2/campus/{campus}/locations?sort=-end_at,host&filter[active]=true&range[host]=c{cluster}, c{cluster + 1}r00s00' ret = requests.get(url, headers=headers, json=params) if ret.status_code != 200: return(print(f"Error: Failed to GET from {url}: Got status code {ret.status_code}")) users_in_campus = ret.json() i = 0 if len(sys.argv) > 1 and sys.argv[1].find("l") > 0: # pprint.pprint(users_in_campus) print_user_info(users_in_campus) if len(users_in_campus) == 0: return(print(f"There are currently {i} active users in cluster {cluster} at campus {campus}")) # Check if we have all elements or if there are more pages if 'Link' in ret.headers and len(users_in_campus)==page['size'] : while True: time.sleep(my_time) page['number'] = page['number'] + 1 ret = requests.get(url, headers=headers, json=params) second_page = ret.json() users_in_campus = users_in_campus + second_page if len(second_page) != page['size']: break # Get ammount of active users for student in users_in_campus: i = i + 1 print(f"There are currently {i} active users in cluster {cluster} at campus {campus}") if i == 0: return chosen_one = random_user(users_in_campus) print("The Chosen One is: ") if pmode: print(users_in_campus[chosen_one]['user']['location']) else: print(users_in_campus[chosen_one]['user']['login']) print(users_in_campus[chosen_one]['user']['location']) # Pick all users from the random's user row if len(sys.argv) > 1 and sys.argv[1].find("r") > 0 : row = get_user_row(users_in_campus[chosen_one]['user']['location']) if row: print(f"The Chosen Row is {row}, and the unlucky ones are: ") for student in users_in_campus: if (get_user_row(student['user']['location'])==row): if pmode: print(student['user']['location'], end=" ") else: print(student['user']['login']) print(student['user']['location']) if pmode: print("") # Pick a random percentage for users to be randomly selected if len(sys.argv) > 1 and sys.argv[1].find("p") > 0 : while (True): percentage = int(input("Percentage of victims (%): ")) if (percentage <= 100 and percentage > 0): break else : print("Percentage must be between 0 and 100") number_users = int(len(users_in_campus) * (percentage / 100)) if number_users <= 0: return (print(f"The percentage {percentage}% translates to a total of 0 users")) sample = random_users(users_in_campus, number_users) # Print chosen users for n in sample: if pmode: print(users_in_campus[n]['user']['location'], end=" ") else: print(users_in_campus[n]['user']['login']) print(users_in_campus[n]['user']['location']) if pmode: print("") def random_users(users_in_campus, nu): i = len(users_in_campus) if (i == 1): sample = [0] else: sample = random.sample(range(i), nu) return (sample) def random_user(users_in_campus): # Pick a random user i = len(users_in_campus) if i > 1: chosen_one = random.randrange(0, i - 1) if i == 1: chosen_one = 0 return (chosen_one) def print_user_info(users_in_campus): for student in users_in_campus: print(f"user: {student['user']['login']}\tloc: {student['user']['location']}") def get_user_row(location): return (location[location.find("r"):location.find("s")]) if __name__ == '__main__': main()
6,451
2,019
# -*- coding: utf-8 -*- """This module provides a way to initialize components for processing pipeline. Init functions are stored into a dictionary which can be used by `Pipeline` to load components on demand. """ from .pipeline import Byte2html, Html2text, Html2image, Html2meta, Text2title def build_factories(): """Creates default factories for Processor.""" factories = { 'byte2html': lambda config: Byte2html(config), 'html2text': lambda config: Html2text(config), 'html2image': lambda config: Html2image(config), 'html2meta': lambda config: Html2meta(config), 'text2title': lambda config: Text2title(config), 'text2title': lambda config: Text2title(config) } return factories
750
217
def sampler(self, z, y=None): '''generate iamge given z''' with tf.variable_scope("generator") as scope: # we hope the weights defined in generator to be reused scope.reuse_variables() if not self.y_dim: s_h, s_w = self.output_height, self.output_width s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2) # project `z` and reshape h0 = tf.reshape( linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'), [-1, s_h16, s_w16, self.gf_dim * 8]) h0 = tf.nn.relu(self.g_bn0(h0, train=False)) h1 = deconv2d(h0, [batch_size, s_h8, s_w8, self.gf_dim*4], name='g_h1') h1 = tf.nn.relu(self.g_bn1(h1, train=False)) h2 = deconv2d(h1, [batch_size, s_h4, s_w4, self.gf_dim*2], name='g_h2') h2 = tf.nn.relu(self.g_bn2(h2, train=False)) h3 = deconv2d(h2, [batch_size, s_h2, s_w2, self.gf_dim*1], name='g_h3') h3 = tf.nn.relu(self.g_bn3(h3, train=False)) h4 = deconv2d(h3, [batch_size, s_h, s_w, self.c_dim], name='g_h4') return tf.nn.tanh(h4) else: s_h, s_w = self.output_height, self.output_width s_h2, s_h4 = int(s_h/2), int(s_h/4) s_w2, s_w4 = int(s_w/2), int(s_w/4) # yb = tf.reshape(y, [-1, 1, 1, self.y_dim]) yb = tf.reshape(y, [batch_size, 1, 1, self.y_dim]) z = concat([z, y], 1) h0 = tf.nn.relu(self.g_bn0(linear(z, self.gfc_dim, 'g_h0_lin'), train=False)) h0 = concat([h0, y], 1) h1 = tf.nn.relu(self.g_bn1( linear(h0, self.gf_dim*2*s_h4*s_w4, 'g_h1_lin'), train=False)) h1 = tf.reshape(h1, [batch_size, s_h4, s_w4, self.gf_dim * 2]) h1 = conv_cond_concat(h1, yb) h2 = tf.nn.relu(self.g_bn2( deconv2d(h1, [batch_size, s_h2, s_w2, self.gf_dim * 2], name='g_h2'), train=False)) h2 = conv_cond_concat(h2, yb) return tf.nn.sigmoid(deconv2d(h2, [batch_size, s_h, s_w, self.c_dim], name='g_h3')) def sampler1(self, z, y=None, reuse=True): '''Generate a given number of samples using z. The first dimension of z is the number of samples''' with tf.variable_scope("generator") as scope: # we hope the weights defined in generator to be reused if reuse: scope.reuse_variables() num_samples = z.get_shape().as_list()[0] s_h, s_w = self.output_height, self.output_width s_h2, s_w2 = conv_out_size_same(s_h, 2), conv_out_size_same(s_w, 2) s_h4, s_w4 = conv_out_size_same(s_h2, 2), conv_out_size_same(s_w2, 2) s_h8, s_w8 = conv_out_size_same(s_h4, 2), conv_out_size_same(s_w4, 2) s_h16, s_w16 = conv_out_size_same(s_h8, 2), conv_out_size_same(s_w8, 2) # project `z` and reshape h0 = tf.reshape( linear(z, self.gf_dim*8*s_h16*s_w16, 'g_h0_lin'), [-1, s_h16, s_w16, self.gf_dim * 8]) h0 = tf.nn.relu(self.g_bn0(h0, train=False)) h1 = deconv2d(h0, [num_samples, s_h8, s_w8, self.gf_dim*4], name='g_h1') h1 = tf.nn.relu(self.g_bn1(h1, train=False)) h2 = deconv2d(h1, [num_samples, s_h4, s_w4, self.gf_dim*2], name='g_h2') h2 = tf.nn.relu(self.g_bn2(h2, train=False)) h3 = deconv2d(h2, [num_samples, s_h2, s_w2, self.gf_dim*1], name='g_h3') h3 = tf.nn.relu(self.g_bn3(h3, train=False)) h4 = deconv2d(h3, [num_samples, s_h, s_w, self.c_dim], name='g_h4') return tf.nn.tanh(h4)
3,673
1,786
## how we measure the similarity between two lists w/ IC per each node ## we have a DAG strucutre ## goal is for each Gene !! output a 'semantic distance' # based on https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2756558/ [but different] # with this two equal nodes will have distance '0' # maximum distance is -2log(1/tot) ~~ 25 import networkx as nx import cPickle as pickle import numpy as np import math import random def calc_me(DG, a, b, PW =False): #actual calculation of IC distance #return IC(a) + IC(b) -2*IC(MICA) # MICA = Max IC Ancestor if any(x not in DG.nodes()for x in [a,b]): #means one key is not in the DG nodes, # it can happen so we need to be safe #return max possible value return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])) #check for obsolete nodes #substitute by the replacement if obsolete a = DG.node[a].get('replaced_by',a) b = DG.node[b].get('replaced_by',b) if any(x not in DG.nodes()for x in [a,b]): #means one key is not in the DG nodes, # it can happen so we need to be safe #return max possible value return 2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])) if a==b : return 0.0 # # IC_a = DG.node[a]['IC'] # IC_b = DG.node[b]['IC'] # # ancestors_a = list(nx.ancestors(DG,a)) # ancestors_b = list(nx.ancestors(DG,b)) # # ancestors_a.append(a) # ancestors_b.append(b) # # common_ancestors = list(set(ancestors_a) & set(ancestors_b)) # ancestors_val = [DG.node[x]['IC'] for x in common_ancestors] # # distance = IC_a + IC_b -2.0*max(ancestors_val) offset =1000 distance = nx.shortest_path_length(DG,a,b,weight='dist')%offset print distance return distance def list_distance(DG,Q,G,Query_distances): #idea is : # for each query HPO calculate all distances # store them in a dict with HPOs as keys # value is the minimum value of distance on the query HPOs # So than for the list of genes it's enough to # collect the values at columns names # and if missing set '1' #cover cases where no HPO from Query # or no HPO provided, or no HPO # associated with the gene if 'NONE' in Q or 'NONE' in G: return (0,Query_distances) if len(Q) <1 or len(G) < 1: return (0,Query_distances) offset =1000 if Query_distances == 0: # #build it for k_q in Q: if k_q not in DG.nodes(): #missing node (obsolete not updated or just wrong value) continue k_q = DG.node[k_q].get('replaced_by',k_q) distance = nx.shortest_path_length(DG,k_q,weight='dist') if Query_distances ==0: Query_distances = {key: float(value)%offset for (key, value) in distance.items()} print 'calc whole dist' else: for k in Query_distances.keys(): try: Query_distances[k] = min([Query_distances[k] , float(distance[k])%offset] ) except: Query_distances[k] = float(Query_distances[k])%offset if Query_distances == 0: #can happen when the original list has no updated HPO or wrong values return (0,0) Query_distances['maxval']=2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])) #now I have the query distances value # map the genes HPO and extract values. # missing one : print it and add it to the db #results = [] maxval = Query_distances['maxval'] results = [Query_distances.get(q_g,maxval) for q_g in G] #for q_g in G: # q_g = DG.node[q_g].get('replaced_by',q_g) # results.append(Query_distances.get(q_g,2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)])))) final_value = np.mean(results)/maxval if final_value > 1: final_value = 1 #borderline cases whiere go up an down to get to the other node return (1-final_value,Query_distances) def calc_distance(DG,query,gene,Query_distances=0): ### DEPRECATED ## Distance (Query, Gene) ## ## Query = HPO list from user ## Gene = HPO associated to each gene #asymmetric one if len(query)*len(gene) ==0: #one of the lists is empty at least return 0 #avg [ sum_{t_i \in Q} min_{t_2 \in G} ( IC(t_1) + IC(t_2) - 2*IC(MICA(t_1,t_2) ) ) ] #graph contains IC distances = [] distances =[ float(min([calc_me(DG,qg,x) for x in gene])) for qg in query] final_value = np.mean(distances)/(2*(max([d['IC'] for n,d in DG.nodes_iter(data=True)]))) #print distances #the division is to ensure a maximum to 1 #print final_value return (1-final_value) def check_qualtiy(DG): #find if all ancestors have IC <= sons # if not, why : for node in DG: ancestors = nx.ancestors(DG,node) ancestors_val = [DG.node[x]['IC'] - DG.node[node]['IC'] for x in ancestors] problematic = [i for i, e in enumerate(ancestors_val) if e > 0] for i in problematic: print node print list(ancestors)[i] print ancestors_val[i] return None def get_DG_edges(HPO, outfile): #This one generates a dict file to generate edges of the HPO graph #download data #wget https://raw.githubusercontent.com/obophenotype/human-phenotype-ontology/master/hp.obo #then call this python ... hp.obo myHPO_edges.pk import sys import cPickle as pickle listfile = HPO out_HPO = dict() replacements =[] alternatives =[] token = False obsolete =False with open(listfile) as rd: for line in rd: if line.startswith('id: HP:'): if token and not obsolete: out_HPO[name]=parents if repl !='': replacements.append((name,repl)) token=True name = line.strip().split('id: ')[1] parents = [] repl ='' obsolete =False elif line.startswith('is_a:'): parents.append(line.strip().split('is_a: ')[1].split(' !')[0]) elif line.startswith('replaced_by:'): #add a field to say it's replaced repl = line.strip().split('replaced_by: ')[1] obsolete =False #means we can backtrack it elif line.startswith('is_obsolete:'): obsolete =True elif line.startswith('alt_id:'): #add alternative nodes, will be later added with # replacement field for the most common one alt = line.strip().split('alt_id: ')[1] alternatives.append((name,alt)) elif line.startswith('consider:'): #add alternative nodes, will be later added with # replacement field for the most common one alt = line.strip().split('consider: ')[1] alternatives.append((alt,name)) obsolete =False #means we can backtrack it out_HPO[name] = parents out_HPO['replacements'] = replacements out_HPO['alternatives'] = alternatives pickle.dump( out_HPO, open( outfile,'wb')) def generate_HPO_graph(edges_file,counts,output): offset =1000 #penalization for the distance #usage: python me edges.pk ontology.txt graph.pk # counts as wget wget http://compbio.charite.de/jenkins/job/hpo.annotations/lastStableBuild/artifact/misc/phenotype_annotation.tab # awk -F '\t' '{print $5}' < phenotype_annotation.tab | sort |uniq -c | awk '{print $2 "\t" $1}' > HPO_counts.txt #idea is a graph with attribute the IC value per node # calculated # generate graph with counts: counts_d=dict() tot = 0 with open(counts) as rd: for line in rd: ff=line.strip().split('\t') counts_d[ff[0]] = int(ff[1]) tot += int(ff[1]) print tot # load dict with edges edges =pickle.load(open(edges_file,'rb')) print( len(edges.keys())) #get replacements of obsolete nodes replacements = dict(edges.get('replacements',[])) tmpval = edges.pop('replacements',None) #let's build a graph DG = nx.DiGraph() #populate with alternatives #mark alternatives as replaced, it's the same for us. alternatives = edges.get('alternatives',[]) tmpval = edges.pop('alternatives',None) # DG.add_edges_from([(1,2)]) for k in edges.keys(): DG.add_node(k) DG.node[k]['count']=0.0 ancestors = [(x,k) for x in edges[k]] DG.add_edges_from(ancestors) if k in replacements.keys(): DG.node[k]['replaced_by']=replacements[k] DG.node[k]['IC'] = -math.log(1.0/tot) #nx.set_node_attributes(DG, 0,'count',) print 'edges' print DG.number_of_edges() print 'nodes' print DG.number_of_nodes() for k in DG.nodes(): DG.node[k]['count']=0.0 #populate with raw counts for k in counts_d.keys(): DG.node[k]['count'] = counts_d[k] DG.nodes(data='count') #now fill it with the actual value. for k in edges.keys(): desc = nx.descendants(DG,k) count = DG.node[k]['count'] for i in desc: count += DG.node[i]['count'] if count >0 : DG.node[k]['IC'] = -math.log(float(count)/tot) else : DG.node[k]['IC'] = -math.log(1.0/tot) #missing nodes, set as rare as possible #print k #print DG.node[k] # add edges weight for a,b in DG.edges(): DG[a][b]['dist']=offset+abs(DG.node[a]['IC'] - DG.node[b]['IC']) #alternatives fill in IC and count for node,k in alternatives: DG.add_node(k) DG.node[k]['count']=0.0 DG.node[k]['replaced_by']=node DG.node[k]['IC'] = DG.node[node]['IC'] #count is the IC of the node then : IC = information content G = DG.to_undirected() DG= G pickle.dump(DG,open(output,'wb')) return None def generate_gene_2_HPO_dict(HPO_info,outfile): #get mapping gene -> HPOs #download from HPO charite ALL_FREQ gene to phenotype #wget http://compbio.charite.de/jenkins/job/hpo.annotations.monthly/lastStableBuild/artifact/annotation/ALL_SOURCES_ALL_FREQUENCIES_genes_to_phenotype.txt gene_2_HPO = dict() with open(HPO_info) as rd: for line in rd: if line.startswith('#'):pass else: ff = line.strip().split('\t') #format #Format: entrez-gene-id<tab>entrez-gene-symbol<tab>HPO-Term-Name<tab>HPO-Term-ID key = ff[1] HPO = ff[-1] to_add = gene_2_HPO.get(key,[]) to_add.append(HPO) to_add= list(set(to_add)) gene_2_HPO[key] = to_add pickle.dump(gene_2_HPO, open(outfile,'wb')) return None def extract_HPO_related_to_gene(gene_2_HPO,gene): #gene_2_HPO : dict with [gene] --- HPO_list if type(gene_2_HPO) is dict: gene_2_HPO_dict = gene_2_HPO else: gene_2_HPO_dict = pickle.load(open(gene_2_HPO,'rb')) outlist = gene_2_HPO_dict.get(gene,[]) return outlist def alter_HPO_list(DG,HPO): #way to get a list of HPO # for each one of these you can # - keep it # - choose an ancestor # - choose a descendant # - remove it # - choose a HPO unrelated # all with same priority out_list =[] toadd ='' for hpo in HPO: if 'NONE' == hpo : out_list = [] break #check replacement hpo = DG.node[hpo].get('replaced_by',hpo) p_val = random.uniform(0,4) if p_val < 1: #keep it out_list.append(hpo) continue elif p_val < 2: #ancestor ancestors = list(nx.ancestors(DG,hpo)) if len(ancestors) >0: toadd=random.choice(ancestors) out_list.append(toadd) continue elif p_val < 3: #descendants, if none, nothing desc = list(nx.descendants(DG,hpo)) if len(desc) >0: toadd=random.choice(desc) out_list.append(toadd) continue #remove it else: ancestors = nx.ancestors(DG,hpo) desc = nx.descendants(DG,hpo) remaining = list(set(DG.node.keys()) - (ancestors |desc |set(hpo))) if len(remaining) >0: toadd=random.choice(remaining) out_list.append(toadd) if len(out_list) <1: out_list=['NONE'] return out_list def attempt_graph_populate_dist(DG,offset=1000): #attempt to use the directed graph to build a new graph with # node1 --> ancestor [dist = length] # node1 --> descendant [dist =length] # so ideally then we can use that to search for shortest path length # and get same value as calc_me GG=nx.Graph() for root_id in DG.nodes(): from_root = (nx.shortest_path_length(DG,root_id,weight='dist')) for k,v in from_root.items(): GG.add_node(k) GG.node[k]['IC '] = DG.node[k]['IC'] links = [(root_id ,k)] GG.add_edges_from(links) GG[root_id][k]['dist']=offset+abs(DG.node[root_id]['IC'] - DG.node[k]['IC']) ## add replaced! replaced_nodes =[x for x in GG.nodes(data=True) if 'replaced_by'in x[1].keys()] for node_info in replaced_nodes: k = node_info[0] node_dict = node_info[1] GG.add_node(k) GG.node[k]['IC '] = node_dict['IC'] GG.node[k]['replaced_by '] = node_dict['replaced_by'] return GG def calc_pairwise(DG,outfile): #generates a file too big for the moment count =0 #select all keys all_dists = dict() #remove all replaced_by offset=1000 GG = attempt_graph_populate_dist(DG,offset) kk = [x for x in DG.node.keys() if 'replaced_by' not in DG.node[x].keys()] DG = GG kk_y = kk #for all keys for key_x in kk: print "%s %s"%(key_x , str(count)) count +=1 #calc distance dists = nx.shortest_path_length(GG,key_x,weight='dist') #pop k from key_y #Store them in a dict key_x,key_y = [val] kk_y.pop(0) tmp_keys =[':'.join([key_x,y]) for y in kk_y] tmp_vals =[dists[y]%offset for y in kk_y] tmp_dict = dict(zip(tmp_keys, tmp_vals)) all_dists.update(tmp_dict) #if keyx == keyy dont store it: dist =0 #another time pickle.dump(all_dists,open(outfile,'wb')) return None
14,915
5,264
import tfgraph def test_data_sets_naive_4(): assert tfgraph.DataSets.naive_4().shape == (8, 2) def test_data_sets_naive_6(): assert tfgraph.DataSets.naive_6().shape == (9, 2) def test_data_sets_compose(): assert tfgraph.DataSets.compose_from_path("./datasets/wiki-Vote/wiki-Vote.csv", True).shape == (65499, 2)
364
143
# -*- coding: utf-8 -*- """ :mod:`haystack.outputs` -- classes that create an output ============================================================================== """ from haystack import utils class Outputter(object): """ Outputter interface """ def __init__(self, memory_handler): self._memory_handler = memory_handler self._ctypes = self._memory_handler.get_target_platform().get_target_ctypes() self._utils = utils.Utils(self._ctypes) self._model = self._memory_handler.get_model() self._addr_cache = {} def parse(self, obj, prefix='', depth=10): raise NotImplementedError('Please define parse')
668
193
#!/usr/bin/python3 import sys # f=open("reduce3.csv","w+") di={} for y in sys.stdin: Record=list(map(str,y.split(","))) if(len(Record)>3): Record=[Record[0]+","+Record[1],Record[2],Record[3]] s=int(Record[2][:-1]) if (Record[0],Record[1]) not in di: di[(Record[0],Record[1])]=[s,1] else: di[(Record[0],Record[1])][0]+=s di[(Record[0],Record[1])][1]+=1 dsr={} for i in di: sr=(di[i][0]*100)/di[i][1] if i[0] not in dsr: dsr[i[0]]=[] else: dsr[i[0]].append((i[1],sr,di[i][0])) for i in sorted(dsr,key=lambda x:x): j=sorted(dsr[i],key=lambda x:(-x[1],-x[2]))[0] print(i,j[0],sep=",") # f.write(i+","+j[0]+"\n")
631
337
# Copyright 2017-present, Bill & Melinda Gates Foundation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import re import os import zipfile import fnmatch from pandas import read_csv from django.core.management.base import BaseCommand, CommandError from ...models import Study, Count, Variable, Domain, EMPTY_IDENTIFIERS # Regex file pattern defining the naming convention of IDX files FILE_PATTERN = r'^IDX_(\w*)\.csv' # Suffixes of domain name, code and category columns # e.g. LB domain columns are LBTEST, LBTESTCD and LBCAT DOMAIN_FORMAT = '{domain}TEST' DOMAIN_CODE_FORMAT = '{domain}TESTCD' DOMAIN_CAT_FORMAT = '{domain}CAT' def get_study(row, study_cache=None, **kwargs): """ Finds the study for an entry. """ study_id_field = kwargs['study_id_field'] if not study_cache: study_cache = {} study_id = row[study_id_field] if study_id in EMPTY_IDENTIFIERS: return None elif study_id in study_cache: return study_cache[study_id] study, _ = Study.objects.get_or_create(study_id=study_id) study_cache[study_id] = study return study def get_domain_variable(row, domain, variable_cache=None): """ Get a Variable model specifying the rows domain, category and code. """ if not variable_cache: variable_cache = {} decode_idx = DOMAIN_FORMAT.format(domain=domain.code) code_idx = DOMAIN_CODE_FORMAT.format(domain=domain.code) cat_idx = DOMAIN_CAT_FORMAT.format(domain=domain.code) code = row[code_idx] if code in EMPTY_IDENTIFIERS: return None attrs = dict(domain=domain, code=code) cache_key = (domain.id, code) if cache_key in variable_cache: return variable_cache[cache_key] try: var = Variable.objects.get(**attrs) except Variable.DoesNotExist: category = row.get(cat_idx) if category not in EMPTY_IDENTIFIERS: attrs['category'] = category var = Variable.objects.create(label=row[decode_idx], **attrs) variable_cache[cache_key] = var return var def get_qualifiers(row, valid_qualifiers, qualifier_cache=None): """ Extract qualifier variables from row """ if not qualifier_cache: qualifier_cache = {} qualifiers = [] for qualifier, qual_code, suffix in valid_qualifiers: code = row.get(qual_code + suffix) if code in EMPTY_IDENTIFIERS: raise ValueError('Qualifiers cannot be empty') elif isinstance(code, float) and code.is_integer(): code = int(code) attrs = dict(domain=qualifier, code=str(code)) cache_key = (qualifier.id, str(code)) if cache_key in qualifier_cache: qualifiers.append(qualifier_cache[cache_key]) continue try: var = Variable.objects.get(**attrs) except Variable.DoesNotExist: var = Variable.objects.create(label=row[qual_code], **attrs) qualifier_cache[cache_key] = var qualifiers.append(var) return qualifiers def get_valid_qualifiers(columns): """ Returns a list of the valid qualifier columns. """ valid_qualifiers = [] qualifiers = Domain.objects.filter(is_qualifier=True) for qual in qualifiers: wildcard_re = fnmatch.translate(qual.code) cols = [col for col in columns if re.match(wildcard_re, col)] if not cols: continue elif len(cols) > 1: raise Exception('Qualifier code must match only one column per file.') qual_code = cols[0] suffix_re = qual_code + r'(\w{1,})' potential_suffixes = [re.match(suffix_re, col).group(1) for col in columns if re.match(suffix_re, col)] suffix = '' if len(potential_suffixes) > 0: suffix = potential_suffixes[0] valid_qualifiers.append((qual, qual_code, suffix)) return valid_qualifiers def process_idx_df(df, domain, **kwargs): """ Process an IDX csv file, creating Code, Count and Study objects. """ count_subj_field = kwargs['count_subj_field'] count_obs_field = kwargs['count_obs_field'] study_id_field = kwargs['study_id_field'] for required in [study_id_field, count_subj_field, count_obs_field]: if required not in df.columns: raise ValueError('IDX file does not contain %s column, ' 'skipping.' % required) valid_qualifiers = get_valid_qualifiers(df.columns) study_cache, variable_cache, qualifier_cache = {}, {}, {} df = df.fillna('NaN') for _, row in df.iterrows(): count = row[count_obs_field] subjects = row[count_subj_field] if any(c in EMPTY_IDENTIFIERS for c in (count, subjects)): continue try: qualifiers = get_qualifiers(row, valid_qualifiers, qualifier_cache) except ValueError: continue study = get_study(row, study_cache, **kwargs) if not study: continue variable = get_domain_variable(row, domain, variable_cache) if variable: qualifiers = [variable] + qualifiers query = Count.objects.create(count=count, subjects=subjects, study=study) query.codes = qualifiers query.save() class Command(BaseCommand): help = """ Loads queries into database given one or more IDX csv files or zip files containing IDX csv files (disregarding all zipfile structure). """ def add_arguments(self, parser): parser.add_argument('files', nargs='+', type=str, help='One or more csv or zip files') parser.add_argument('-study_id_field', type=str, default='STUDYID', help='Name of column to use as study_id.') parser.add_argument('-count_subj_field', type=str, default='COUNT_SUBJ', help='Name of column to use as subject count.') parser.add_argument('-count_obs_field', type=str, default='COUNT_OBS', help='Name of column to use as observation count.') parser.add_argument('--clear', action='store_true', default=True, dest='clear', help='Clear database before processing data.') def process_file(self, filepath, zip_file=None, **kwargs): # Ensure the file matches the FILE_PATTERN basename = os.path.basename(filepath) match = re.search(FILE_PATTERN, basename) if not match: return False # Ensure that Domain exists domain = match.group(1) try: domain = Domain.objects.get(code=domain) except Domain.DoesNotExist: return False # Load file try: if zip_file: with zip_file.open(filepath) as f: df = read_csv(f) else: with open(filepath) as f: df = read_csv(f) except: self.stderr.write('%s could not be read ensure ' 'it is a valid csv file.' % basename) return False # Process dataframe self.stdout.write('Processing %s' % basename) try: process_idx_df(df, domain, **kwargs) except ValueError as e: self.stderr.write(str(e)) return True def handle(self, *args, **options): if options['clear']: queries = Count.objects.all() self.stdout.write('Deleting %s counts' % len(queries)) queries.delete() codes = Variable.objects.all() self.stdout.write('Deleting %s variables' % len(codes)) codes.delete() n_queries = Count.objects.count() n_studies = Study.objects.count() n_codes = Variable.objects.count() processed = False for f in options['files']: if f.endswith('.csv'): if not re.search(FILE_PATTERN, os.path.basename(f)): self.stderr.write('Processing %s skipped, does ' 'not match %s naming convention.' % (f, FILE_PATTERN)) continue processed = self.process_file(f, **options) elif f.endswith('.zip') or f.endswith('.upload'): zip_file = zipfile.ZipFile(f) for zf in zip_file.filelist: processed |= self.process_file(zf.filename, zip_file, **options) if not processed: raise CommandError('None of the supplied files could ' 'be processed.') self.stdout.write('Wrote %s Study entries' % (Study.objects.count() - n_studies)) self.stdout.write('Wrote %s Variable entries' % (Variable.objects.count() - n_codes)) self.stdout.write('Wrote %s Count entries' % (Count.objects.count() - n_queries))
9,622
2,815
from setuptools import setup, find_packages with open("README.md", encoding="utf-8") as f: long_description = f.read() setup( name="neureca", version="0.0.1", description="A framework for building conversational recommender systems", long_description=long_description, long_description_content_type="text/markdown", author="Hojin Yang", author_email="hojin.yang7@gmail.com", url="https://github.com/hojinYang/neureca", entry_points={ "console_scripts": [ "neureca-train = neureca.cmd:neureca_train_command", ], }, install_requires=[ "click==7.1.2", "Flask==1.1.2", "joblib==1.0.1", "numpy==1.20.2", "pandas==1.2.3", "pytorch-crf==0.7.2", "pytorch-lightning==1.2.7", "scikit-learn==0.24.1", "scipy==1.6.2", "sklearn==0.0", "spacy==3.0.6", "summarizers==1.0.4", "tokenizers==0.10.2", "toml==0.10.2", "torch==1.8.1", "TorchCRF==1.1.0", "torchmetrics==0.3.1", "tqdm==4.60.0", "transformers==4.5.0", "typer==0.3.2", ], packages=find_packages(exclude=["demo-toronto"]), python_requires=">=3", package_data={"neureca": ["interface/static/*/*", "interface/templates/index.html"]}, zip_safe=False, classifiers=[ "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Topic :: Scientific/Engineering :: Artificial Intelligence", "Topic :: Software Development :: Libraries", ], )
1,937
688
### # Thread rlock test. # # License - MIT. ### import time from threading import Thread, RLock # thread_test2 - Thread test2 function. def thread_test2(rlock): # { time.sleep(0.5) rlock.acquire() print('Third acquire.') rlock.release() # } # thread_test1 - Thread test1 function. def thread_test1(rlock): # { rlock.acquire() print('First acquire.') rlock.acquire() print('Second acquire.') rlock.release() rlock.release() # } # Main function. def main(): # { # Create RLock thrd_rlock = RLock() thrd1 = Thread(target = thread_test1, args = (thrd_rlock, )) thrd2 = Thread(target = thread_test2, args = (thrd_rlock, )) thrd1.start() thrd2.start() # } # Program entry. if '__main__' == __name__: main()
786
311
import shutil import tempfile from indigo.bingo import Bingo from tests import TestIndigoBase class TestBingo(TestIndigoBase): def setUp(self) -> None: super().setUp() self.test_folder = tempfile.mkdtemp() def tearDown(self) -> None: shutil.rmtree(self.test_folder) def test_molecule_search_sub(self) -> None: bingo = Bingo.createDatabaseFile(self.indigo, self.test_folder, 'molecule', '') self.assertTrue(bingo) m1 = self.indigo.loadMolecule('C1CCCCC1') m2 = self.indigo.loadMolecule('C1CCCCC1') m3 = self.indigo.loadMolecule('C1CCNCC1') m4 = self.indigo.loadMolecule('N') m1_id = bingo.insert(m1) m2_id = bingo.insert(m2) m3_id = bingo.insert(m3) bingo.insert(m4) bingo.optimize() q = self.indigo.loadQueryMolecule('C') result = bingo.searchSub(q) ids = [] while result.next(): ids.append(result.getCurrentId()) self.assertEqual(3, len(ids)) self.assertEqual([m1_id, m2_id, m3_id], ids) self.assertTrue(self.indigo.exactMatch(m1, bingo.getRecordById(m1_id))) self.assertTrue(self.indigo.exactMatch(m2, bingo.getRecordById(m2_id))) self.assertTrue(self.indigo.exactMatch(m3, bingo.getRecordById(m3_id)))
1,319
495
# Copyright 2008-2018 Univa Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # pylint: disable=no-member import json import sys from tortuga.exceptions.kitNotFound import KitNotFound from tortuga.kit.kitCli import KitCli from tortuga.wsapi.kitWsApi import KitWsApi class GetKitCli(KitCli): """ Get kit command line interface. """ def parseArgs(self, usage=None): cmd_options_group = _('Command Options') self.addOptionGroup(cmd_options_group, '') self.addOptionToGroup(cmd_options_group, '--quiet', action='store_true', dest='bQuiet', help=_('Return success (0) if kit exists,' ' otherwise 1.')) output_attr_group = _('Output formatting options') self.addOptionGroup(output_attr_group, None) self.addOptionToGroup( output_attr_group, '--json', action='store_true', default=False, help=_('JSON formatted output') ) self.addOptionToGroup( output_attr_group, '--xml', action='store_true', default=False, help=_('XML formatted output') ) super(GetKitCli, self).parseArgs(usage=usage) def runCommand(self): self.parseArgs(_(""" Returns details of the specified kit """)) name, version, iteration = \ self.getKitNameVersionIteration(self.getArgs().kitspec) api = self.configureClient(KitWsApi) try: kit = api.getKit(name, version=version, iteration=iteration) if not self.getArgs().bQuiet: if self.getArgs().xml: print(kit.getXmlRep()) elif self.getArgs().json: print(json.dumps({ 'kit': kit.getCleanDict(), }, sort_keys=True, indent=4, separators=(',', ': '))) else: self._console_output(kit) sys.exit(0) except KitNotFound: if self.getArgs().bQuiet: sys.exit(1) # Push the "kit not found" exception up the stack raise def _console_output(self, kit): print('{0}-{1}-{2}'.format(kit.getName(), kit.getVersion(), kit.getIteration())) print(' ' * 2 + '- Description: {0}'.format(kit.getDescription())) print(' ' * 2 + '- Type: {0}'.format( 'OS' if kit.getIsOs() else 'Application' if kit.getName() != 'base' else 'System')) print(' ' * 2 + '- Removable: {0}'.format(kit.getIsRemovable())) print(' ' * 2 + '- Components:') for component in kit.getComponentList(): print(' ' * 4 + '- Name: {0}, Version: {1}'.format( component.getName(), component.getVersion())) print(' ' * 6 + '- Description: {0}'.format( component.getDescription())) if not kit.getIsOs(): compatible_os = component.getOsInfoList() +\ component.getOsFamilyInfoList() else: compatible_os = [] if compatible_os: print(' ' * 6 + '- Operating system(s): {0}'.format( ', '.join([str(item) for item in compatible_os]))) def main(): GetKitCli().run()
3,932
1,139
from power_sizing import calculate_power_luminance from power_sizing import calculate_number_and_power_of_tugs from conductor_sizing import conduction_capacity from conductor_sizing import minimum_section from conductor_sizing import voltage_drop from conductor_sizing import harmonic_rate from neutral_sizing import get_neutral_section from protection_sizing import get_conductor_protection_section import pathlib #IMPORTANT: all inputs are in portuguese, remember this # Calculate power luminance of an ambient # inputs: Area (m^2) calculate_power_luminance(12) # Calculate power luminance of an ambient # inputs: AmbientName (str), perimeter (m) calculate_number_and_power_of_tugs('cozinha',13.3) # Sizing conductor by capacity conduction # inputs: power (Watts/VA), tension: optional (default 220), Potency-factor: optional (used if Watts, default 1) # circuit_type: optional mono/tri (str) (default mono) section1 = conduction_capacity(21000, fp=0.9 ,ft=0.87, fg=0.8, circuit_type='tri') # Sizing conductor by section minimum # inputs: Circuit type (str) section2 = minimum_section('forca') # Sizing conductor by voltage drop # inputs: power (Watts/VA), distance in (m), fp: (default 1), circuit_type: optional 'mono'/'tri' (default 'mono') # isolation_type = optional 0 to Non-Magnetic 1 to Magnetic (default 0), drop_rate: optional (default 0.04) section3 = voltage_drop(13000,40, drop_rate=0.02, circuit_type='tri', fp = 0.75, isolation_type = 0) # Sizing conductor by harmonic # inputs: harmonics [I1, I3, I5...] circuit_type: optional 'tri'/'bi' (default 'tri') section4, thd3 = harmonic_rate(harmonics = [100,60,45,30,20], fp = 1, ft=1, fg=1 , circuit_type = 'tri', installation_method = 'B1') # Sizing neutral # inputs: phase_section (mm), Ib: project current, balanced_circuit: optional bool (default True), circuit_type: optional 'mono'/'tri' (default 'mono') neutral_section1 = get_neutral_section(95, 10, circuit_type = 'tri', index_THD3 = 0.14, balanced_circuit = True) # Sizing protection # inputs: phase_section (mm), Ib: Project current neutral_section1 = get_neutral_section(95, 127, index_THD3 = 0.14, circuit_type = 'tri', balanced_circuit = True, installation_method = 'B1', ft=1, fg=1) get_conductor_protection_section(95)
2,264
815
# Generated by Django 3.1.6 on 2021-05-15 11:46 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('myapp', '0007_auto_20210317_1817'), ] operations = [ migrations.CreateModel( name='doctordata', fields=[ ('itemid', models.IntegerField(primary_key=True, serialize=False)), ('dname', models.CharField(max_length=50)), ('sid', models.IntegerField()), ], ), migrations.CreateModel( name='specialist', fields=[ ('sid', models.IntegerField(primary_key=True, serialize=False)), ('sname', models.CharField(max_length=50)), ], ), ]
784
240
#! /usr/bin/env python # -*- coding: utf-8 -*- # # >> # python-eventide, 2020 # LiveViewTech # << from uuid import UUID, uuid4 from datetime import datetime from operator import attrgetter from functools import total_ordering from dataclasses import ( field, asdict, fields, dataclass, _process_class, make_dataclass, ) from typing import ( Dict, List, Type, Mapping, Callable, Optional, NamedTuple, ) from pydantic import BaseModel, Field from eventide.utils import jdumps, jloads, dense_dict from eventide._types import JSON f_blank = Field(default=None) class Metadata(BaseModel): """A message's metadata object contains information about the stream where the message resides, the previous message in a series of messages that make up a messaging workflow, the originating process to which the message belongs, as well as other data that are pertinent to understanding the provenance and disposition. Message metadata is data about messaging machinery, like message schema version, source stream, positions, provenance, reply address, and the like. """ class Config: extra = 'allow' orm_mode = True # yapf: disable stream_name: Optional[str] = f_blank position: Optional[int] = f_blank global_position: Optional[int] = f_blank causation_message_stream_name: Optional[str] = f_blank causation_message_position: Optional[int] = f_blank causation_message_global_position: Optional[int] = f_blank correlation_stream_name: Optional[str] = f_blank reply_stream_name: Optional[str] = f_blank schema_version: Optional[str] = f_blank time: Optional[float] = f_blank # yapf: enable def __repr__(self) -> str: # dynamically scan the available fields the first time this # object instance is printed out, looking for fields where # repr=True -- we then save those fields so we can dynamically # extract their current value each time. attr = '__repr_fields__' if not hasattr(self, attr): repr_fields = filter(lambda f: f.repr, fields(self)) repr_fields = set(map(attrgetter('name'), repr_fields)) setattr(self, attr, repr_fields) o = ', '.join('%s=%s' % (k, getattr(self, k)) for k in getattr(self, attr)) return '%s(%s)' % (self.__class__.__name__, o) def to_dict(self) -> Dict: return self.dict(skip_defaults=True, exclude_unset=True) def to_json(self) -> str: return jdumps(self.to_dict()) @property def identifier(self) -> str: return '%s/%d' % (self.stream_name, self.position) @property def causation_identifier(self) -> str: return '%s/%d' % ( self.causation_message_stream_name, self.causation_message_position ) @property def replies(self) -> bool: return bool(self.reply_stream_name) def do_not_reply(self) -> 'Metadata': self.reply_stream_name = None return self def follow(self, other: 'Metadata') -> 'Metadata': self.causation_message_stream_name = other.stream_name self.causation_message_position = other.position self.causation_message_global_position = other.global_position self.correlation_stream_name = other.correlation_stream_name self.reply_stream_name = other.reply_stream_name return self def follows(self, other: 'Metadata') -> bool: return self.causation_message_stream_name == other.stream_name \ and self.causation_message_position == other.position \ and self.causation_message_global_position == other.global_position \ and self.correlation_stream_name == other.correlation_stream_name \ and self.reply_stream_name == other.reply_stream_name def correlates(self, stream_name: str) -> bool: return self.correlation_stream_name == stream_name @dataclass(frozen=True, repr=True) @total_ordering class MessageData: """MessageData is the raw, low-level storage representation of a message. These instances are READ from the database and should not be created directly. """ type: str stream_name: str data: JSON metadata: JSON id: UUID position: int global_position: int time: float @classmethod def from_record(cls, record: Mapping) -> 'MessageData': """Build a new instance from a row in the message store.""" rec = dict(record) rec['data'] = jloads(rec.get('data', '{}')) rec['metadata'] = jloads(rec.get('metadata', '{}')) rec['time'] = rec.get('time', datetime.utcnow()).timestamp() return cls(**rec) def __gt__(self, other: 'MessageData') -> bool: return self.global_position > other.global_position def __ge__(self, other: 'MessageData') -> bool: return self.global_position >= other.global_position def __eq__(self, other: 'MessageData') -> bool: return self.stream_name == other.stream_name \ and self.type == other.type \ and self.data == other.data \ and self.metadata == other.metadata @property def category(self) -> str: return self.stream_name.split('-')[0] @property def is_category(self) -> bool: return '-' not in self.stream_name @property def stream_id(self) -> Optional[str]: if '-' not in self.stream_name: return None return self.stream_name.split('-', 1)[1] @property def cardinal_id(self) -> Optional[str]: if '-' not in self.stream_name: return None return self.stream_name.split('-', 1)[1].split('+')[0] @property def command(self) -> Optional[str]: if ':' not in self.category: return None return self.category.split(':', 1)[1].split('-')[0] class SerializedMessage(NamedTuple): """A light representation of a Message instance before writing to message store.""" id: str stream_name: str type: str data: str metadata: str expected_version: Optional[int] @dataclass(frozen=False, repr=False, init=True, eq=False) class Message: """Base class for defining custom Message records for the message store. Messages are converted into SerializedMessage right before being written, and are created from MessageData instances when being deserialized. This class should not be instantiated directly but instead should be the parent class on other structures that are persisted to the database. """ id: UUID = Field(default_factory=uuid4, alias='_id_') metadata: Metadata = Field(default_factory=Metadata, alias='_metadata_') @classmethod def from_messagedata(cls, data: 'MessageData', strict: bool = False) -> 'Message': if strict: if data.type != cls.__name__: raise ValueError('invalid class name, does not match type `%s`' % data.type) # coerce the metadata object # .. attempt to assign all the metadata fields and values from the # incoming MessageData instance onto this custom Message instance. # These additional attributes can be specified before the underlying Message # instance is created by decorating the class with @messagecls. meta_obj = {} meta_fields = cls.__dataclass_fields__['metadata'].metadata or {} for k, v in data.metadata.items(): if k not in meta_fields: if strict: raise ValueError('undefined metadata field name `%s`' % k) # else: # skipping field: value if k in meta_fields: meta_obj[k] = v # create instance msg = cls(**data.data) msg.id = data.id msg.metadata = msg.metadata.__class__(**meta_obj) # return instance of custom class return msg def __eq__(self, other: 'Message') -> bool: if not isinstance(other, self.__class__): return False attrs = self.attributes() for k, v in other.attributes().items(): if k in ('id', 'metadata'): continue if attrs.get(k, not v) != v: return False return True @property def type(self) -> str: return self.__class__.__name__ def attributes(self) -> Dict: return asdict(self) def attribute_names(self) -> List[str]: return list(self.attributes().keys()) def follow(self, other: 'Message') -> 'Message': self.metadata.follow(other.metadata) return self def follows(self, other: 'Message') -> bool: return self.metadata.follows(other.metadata) def serialize( self, stream_name: str, expected_version: Optional[int] = None, json_default_fn: Optional[Callable] = None, ) -> SerializedMessage: """Prepare this instance to be written to the message store. Returns a serialized version of this object's data. """ data = self.attributes() # separate the metadata from the data meta = dense_dict(data.pop('metadata')) # remove the UUID, since it has its own column del data['id'] # build the response instance return SerializedMessage( str(self.id), stream_name, self.type, jdumps(data, json_default_fn), jdumps(meta, json_default_fn), expected_version, ) def messagecls( cls_=None, *, msg_meta: Type[Metadata] = Metadata, init=True, repr=True, eq=True, order=False, unsafe_hash=False, frozen=False, ) -> Type[Message]: """Decorator used to build a custom Message type, with the ability to bind a custom Metadata class with additional fields. When these instances are built, serialized, or de-serialized from the database all the correct fields will be filled out with no interference on in-editor linters. The parameters for this decorator copy @dataclass with the addition of ``msg_meta`` which allows the definition to have a custom Metadata class assigned to it. All @messagecls decorated classes behave like normal dataclasses. """ def wrap(cls): # turn the wrapped class into a dataclass kls = dataclass( cls, init=init, repr=repr, eq=eq, order=order, unsafe_hash=unsafe_hash, frozen=frozen, ) # extract all the field names and types from the new class definition m_fields = {f.name: f.type for f in fields(msg_meta)} # re-create the msg_meta class on the `metadata` attribute for this Message # object. We attach the new (and old) fields into the metadata flag for # this field so we don't have to process those values every time an instance # is de-serialized from the database. return make_dataclass( cls.__name__, fields=[ ( 'metadata', msg_meta, field( init=False, default_factory=msg_meta, metadata=m_fields, ), ), ], bases=( kls, Message, ), ) # ensure this class definition follows basic guidelines if not hasattr(msg_meta, '__dataclass_fields__'): raise ValueError('custom message metadata class must be a @dataclass') if not issubclass(msg_meta, Metadata): raise ValueError('custom message metadata class must inherit eventide.Metadata') # "wrap" the Metadata class with @dataclass so we don't have to on its definition msg_meta = _process_class(msg_meta, True, False, True, False, False, False) # mimic @dataclass functionality if cls_ is None: return wrap return wrap(cls_) message_cls = messagecls # alias
12,338
3,431
#!/usr/bin/env python3.6 # -*- coding: utf8 -*- ''' ELQuent.minifier E-mail code minifier Mateusz Dąbrowski github.com/MateuszDabrowski linkedin.com/in/mateusz-dabrowski-marketing/ ''' import os import re import sys import json import pyperclip from colorama import Fore, Style, init # ELQuent imports import utils.api.api as api # Initialize colorama init(autoreset=True) # Globals naming = None source_country = None # Predefined messege elements ERROR = f'{Fore.WHITE}[{Fore.RED}ERROR{Fore.WHITE}] {Fore.YELLOW}' WARNING = f'{Fore.WHITE}[{Fore.YELLOW}WARNING{Fore.WHITE}] ' SUCCESS = f'{Fore.WHITE}[{Fore.GREEN}SUCCESS{Fore.WHITE}] ' YES = f'{Style.BRIGHT}{Fore.GREEN}y{Fore.WHITE}{Style.NORMAL}' NO = f'{Style.BRIGHT}{Fore.RED}n{Fore.WHITE}{Style.NORMAL}' def country_naming_setter(country): ''' Sets source_country for all functions Loads json file with naming convention ''' global source_country source_country = country # Loads json file with naming convention with open(file('naming'), 'r', encoding='utf-8') as f: global naming naming = json.load(f) ''' ================================================================================= File Path Getter ================================================================================= ''' def file(file_path, file_name=''): ''' Returns file path to template files ''' def find_data_file(filename, directory='outcomes'): ''' Returns correct file path for both script and frozen app ''' if directory == 'main': # Files in main directory if getattr(sys, 'frozen', False): datadir = os.path.dirname(sys.executable) else: datadir = os.path.dirname(os.path.dirname(__file__)) return os.path.join(datadir, filename) elif directory == 'api': # For reading api files if getattr(sys, 'frozen', False): datadir = os.path.dirname(sys.executable) else: datadir = os.path.dirname(os.path.dirname(__file__)) return os.path.join(datadir, 'utils', directory, filename) elif directory == 'outcomes': # For writing outcome files if getattr(sys, 'frozen', False): datadir = os.path.dirname(sys.executable) else: datadir = os.path.dirname(os.path.dirname(__file__)) return os.path.join(datadir, directory, filename) file_paths = { 'naming': find_data_file('naming.json', directory='api'), 'mail_html': find_data_file(f'WK{source_country}_{file_name}.txt') } return file_paths.get(file_path) ''' ================================================================================= Code Output Helper ================================================================================= ''' def output_method(html_code): ''' Allows user choose how the program should output the results Returns email_id if creation/update in Eloqua was selected ''' # Asks which output print( f'\n{Fore.GREEN}New code should be:', f'\n{Fore.WHITE}[{Fore.YELLOW}0{Fore.WHITE}]\t»', f'{Fore.WHITE}[{Fore.YELLOW}FILE{Fore.WHITE}] Only saved to Outcomes folder', f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t»', f'{Fore.WHITE}[{Fore.YELLOW}HTML{Fore.WHITE}] Copied to clipboard as HTML for pasting [CTRL+V]', f'\n{Fore.WHITE}[{Fore.YELLOW}2{Fore.WHITE}]\t»', f'{Fore.WHITE}[{Fore.YELLOW}CREATE{Fore.WHITE}] Uploaded to Eloqua as a new E-mail', f'\n{Fore.WHITE}[{Fore.YELLOW}3{Fore.WHITE}]\t»', f'{Fore.WHITE}[{Fore.YELLOW}UPDATE{Fore.WHITE}] Uploaded to Eloqua as update to existing E-mail') email_id = '' while True: print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='') choice = input(' ') if choice == '0': break elif choice == '1' and html_code: pyperclip.copy(html_code) print( f'\n{SUCCESS}You can now paste the HTML code [CTRL+V]') break elif choice == '2': print( f'\n{Fore.WHITE}[{Fore.YELLOW}NAME{Fore.WHITE}] » Write or copypaste name of the E-mail:') name = api.eloqua_asset_name() api.eloqua_create_email(name, html_code) break elif choice == '3': print( f'\n{Fore.WHITE}[{Fore.YELLOW}ID{Fore.WHITE}] » Write or copypaste ID of the E-mail to update:') email_id = input(' ') if not email_id: email_id = pyperclip.paste() api.eloqua_update_email(email_id, html_code) break else: print(f'{ERROR}Entered value does not belong to any utility!') choice = '' return ''' ================================================================================= E-mail Minifier ================================================================================= ''' def email_minifier(code): ''' Requires html code of an e-mail Returns minified html code of an e-mail ''' # HTML Minifier html_attr = ['html', 'head', 'style', 'body', 'table', 'tbody', 'tr', 'td', 'th', 'div'] for attr in html_attr: code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code) code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code) code = re.sub(r'"\n+\s*', '" ', code) for attr in ['alt', 'title', 'data-class']: code = re.sub(rf'{attr}=""', '', code) code = re.sub(r'" />', '"/>', code) code = re.sub(r'<!--[^\[\]]*?-->', '', code) for attr in html_attr: code = re.sub(rf'{attr}>\s*\n\s*', f'{attr}>', code) code = re.sub(rf'\s*\n\s+<{attr}', f'<{attr}', code) # Conditional Comment Minifier code = re.sub( r'\s*\n*\s*<!--\[if mso \| IE\]>\s*\n\s*', '\n<!--[if mso | IE]>', code) code = re.sub( r'\s*\n\s*<!\[endif\]-->\s*\n\s*', '<![endif]-->\n', code) # CSS Minifier code = re.sub(r'{\s*\n\s*', '{', code) code = re.sub(r';\s*\n\s*}\n\s*', '} ', code) code = re.sub(r';\s*\n\s*', '; ', code) code = re.sub(r'}\n+', '} ', code) # Whitespace Minifier code = re.sub(r'\t', '', code) code = re.sub(r'\n+', ' ', code) while ' ' in code: code = re.sub(r' {2,}', ' ', code) # Trim lines to maximum of 500 characters count = 0 newline_indexes = [] for i, letter in enumerate(code): if count > 450: if letter in ['>', ' ']: newline_indexes.append(i) count = 0 else: count += 1 for index in reversed(newline_indexes): output = code[:index+1] + '\n' + code[index+1:] code = output # Takes care of lengthy links that extends line over 500 characters while True: lengthy_lines_list = re.findall(r'^.{500,}$', code, re.MULTILINE) if not lengthy_lines_list: break lengthy_link_regex = re.compile(r'href=\".{40,}?\"|src=\".{40,}?\"') for line in lengthy_lines_list: lengthy_link_list = re.findall(lengthy_link_regex, line) code = code.replace( lengthy_link_list[0], f'\n{lengthy_link_list[0]}') return code def email_workflow(email_code=''): ''' Minifies the e-mail code ''' if email_code: module = True # Gets e-mail code if not delivered via argument elif not email_code: module = False print( f'\n{Fore.WHITE}[{Fore.YELLOW}Code{Fore.WHITE}] » Copy code of the E-mail to minify and click [Enter]:') input() email_code = pyperclip.paste() # Gets the code from the user while True: email_code = pyperclip.paste() is_html = re.compile(r'<html[\s\S\n]*?</html>', re.UNICODE) if is_html.findall(email_code): print(f'{Fore.WHITE}» {SUCCESS}Code copied from clipboard') break print( f'{Fore.WHITE}» {ERROR}Invalid HTML. Copy valid code and click [Enter]', end='') input(' ') # Saves original code to outcomes folder with open(file('mail_html', file_name='original_code'), 'w', encoding='utf-8') as f: f.write(email_code) # Gets file size of original file original_size = os.path.getsize( file('mail_html', file_name='original_code')) # Minified the code minified_code = email_minifier(email_code) # Saves minified code to outcomes folder with open(file('mail_html', file_name='minified_code'), 'w', encoding='utf-8') as f: f.write(minified_code) # Gets file size of minified file minified_size = os.path.getsize( file('mail_html', file_name='minified_code')) print(f'\n{Fore.WHITE}» {SUCCESS}E-mail was minified from {Fore.YELLOW}{round(original_size/1024)}kB' f'{Fore.WHITE} to {Fore.YELLOW}{round(minified_size/1024)}kB' f' {Fore.WHITE}({Fore.GREEN}-{round((original_size-minified_size)/original_size*100)}%{Fore.WHITE})!') if not module: # Outputs the code output_method(minified_code) # Asks user if he would like to repeat print(f'\n{Fore.YELLOW}» {Fore.WHITE}Do you want to {Fore.YELLOW}minify another Email{Fore.WHITE}?', f'{Fore.WHITE}({YES}/{NO}):', end=' ') choice = input('') if choice.lower() == 'y': print( f'\n{Fore.GREEN}-----------------------------------------------------------------------------') email_workflow() return ''' ================================================================================= Minifier module menu ================================================================================= ''' def minifier_module(country): ''' Lets user minify the HTML code ''' # Create global source_country and load json file with naming convention country_naming_setter(country) # Report type chooser print( f'\n{Fore.GREEN}ELQuent.minifier Utilites:' f'\n{Fore.WHITE}[{Fore.YELLOW}1{Fore.WHITE}]\t» [{Fore.YELLOW}E-mail{Fore.WHITE}] Minifies e-mail code' f'\n{Fore.WHITE}[{Fore.YELLOW}Q{Fore.WHITE}]\t» [{Fore.YELLOW}Quit to main menu{Fore.WHITE}]' ) while True: print(f'{Fore.YELLOW}Enter number associated with chosen utility:', end='') choice = input(' ') if choice.lower() == 'q': break elif choice == '1': email_workflow() break else: print(f'{Fore.RED}Entered value does not belong to any utility!') choice = '' return
10,926
3,655
############################################################################ # Theme setup html_theme = 'invitae' html_theme_path = ['themes'] if html_theme == 'sphinx_rtd_theme': import sphinx_rtd_theme html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] elif html_theme == 'bootstrap': import sphinx_bootstrap_theme html_theme_path = sphinx_bootstrap_theme.get_html_theme_path() ############################################################################ # Project config import uta version = uta.__version__ release = str(uta.__version__) project = u'UTA' authors = project + ' Contributors' copyright = u'2015, ' + authors extlinks = { 'issue': ('https://bitbucket.org/biocommons/uta/issue/%s', 'UTA issue '), } man_pages = [ ('index', 'uta', u'UTA Documentation', [u'UTA Contributors'], 1) ] ############################################################################ # Boilerplate # , 'inherited-members'] autodoc_default_flags = ['members', 'undoc-members', 'show-inheritance'] exclude_patterns = ['build', 'static', 'templates', 'themes'] extensions = [ 'sphinx.ext.autodoc', 'sphinx.ext.autosummary', 'sphinx.ext.coverage', 'sphinx.ext.intersphinx', 'sphinx.ext.pngmath', 'sphinx.ext.todo', 'sphinx.ext.viewcode', 'sphinxcontrib.fulltoc', ] html_favicon = 'static/favicon.ico' html_logo = 'static/logo.png' html_static_path = ['static'] html_title = '{project} {release}'.format(project=project, release=release) intersphinx_mapping = { 'http://docs.python.org/': None, } master_doc = 'index' pygments_style = 'sphinx' source_suffix = '.rst' templates_path = ['templates'] # <LICENSE> # Copyright 2014 UTA Contributors (https://bitbucket.org/biocommons/uta) ## # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at ## # http://www.apache.org/licenses/LICENSE-2.0 ## # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # </LICENSE>
2,305
772
def leiaInt(msg): while True: try: i = int(input(msg)) except (ValueError, TypeError): print('\033[1;3;31mERRO: Por favor, digite um número inteiro válido.\033[0;0;0m') continue except (KeyboardInterrupt): print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n') return 0 else: return i def leiaFloat(msg): while True: try: r = float(input(msg)) except (TypeError, ValueError): print('\033[1;3;31mERRO: Por favor, digite um número real válido.\033[0;0;0m') continue except (KeyboardInterrupt): print('\n\033[1;3;33mUsuário preferiu não digitar esse número.\033[0;0;0m\n') return 0 else: return r li = leiaInt('Digite um número inteiro: ') lr = leiaFloat('Digite um número real: ') print(f'\033[1;3;34mO valor inteiro foi {li} e o real foi {lr}.\033[0;0;0m')
997
363
import json import logging import retrying import sdk_cmd LOG = logging.getLogger(__name__) def wait_for_brokers(client: str, brokers: list): """ Run bootstrap on the specified client to resolve the list of brokers """ LOG.info("Running bootstrap to wait for DNS resolution") bootstrap_cmd = ['/opt/bootstrap', '-print-env=false', '-template=false', '-install-certs=false', '-resolve-hosts', ','.join(brokers)] bootstrap_output = sdk_cmd.task_exec(client, ' '.join(bootstrap_cmd)) LOG.info(bootstrap_output) assert "SDK Bootstrap successful" in ' '.join(str(bo) for bo in bootstrap_output) def is_not_authorized(output: str) -> bool: return "AuthorizationException: Not authorized to access" in output def get_kerberos_client_properties(ssl_enabled: bool) -> list: protocol = "SASL_SSL" if ssl_enabled else "SASL_PLAINTEXT" return ['security.protocol={protocol}'.format(protocol=protocol), 'sasl.mechanism=GSSAPI', 'sasl.kerberos.service.name=kafka', ] def get_ssl_client_properties(cn: str, has_kerberos: bool) -> list: if has_kerberos: client_properties = [] else: client_properties = ["security.protocol=SSL", ] client_properties.extend(["ssl.truststore.location = {cn}_truststore.jks".format(cn=cn), "ssl.truststore.password = changeit", "ssl.keystore.location = {cn}_keystore.jks".format(cn=cn), "ssl.keystore.password = changeit", ]) return client_properties def write_client_properties(id: str, task: str, lines: list) -> str: """Write a client properties file containing the specified lines""" output_file = "{id}-client.properties".format(id=id) LOG.info("Generating %s", output_file) output = sdk_cmd.create_task_text_file(task, output_file, lines) LOG.info(output) return output_file def write_jaas_config_file(primary: str, task: str, krb5: object) -> str: output_file = "{primary}-client-jaas.config".format(primary=primary) LOG.info("Generating %s", output_file) # TODO: use kafka_client keytab path jaas_file_contents = ['KafkaClient {', ' com.sun.security.auth.module.Krb5LoginModule required', ' doNotPrompt=true', ' useTicketCache=true', ' principal=\\"{primary}@{realm}\\"'.format(primary=primary, realm=krb5.get_realm()), ' useKeyTab=true', ' serviceName=\\"kafka\\"', ' keyTab=\\"/tmp/kafkaconfig/kafka-client.keytab\\"', ' client=true;', '};', ] output = sdk_cmd.create_task_text_file(task, output_file, jaas_file_contents) LOG.info(output) return output_file def write_krb5_config_file(task: str, krb5: object) -> str: output_file = "krb5.config" LOG.info("Generating %s", output_file) try: # TODO: Set realm and kdc properties krb5_file_contents = ['[libdefaults]', 'default_realm = {}'.format(krb5.get_realm()), '', '[realms]', ' {realm} = {{'.format(realm=krb5.get_realm()), ' kdc = {}'.format(krb5.get_kdc_address()), ' }', ] log.info("%s", krb5_file_contents) except Exception as e: log.error("%s", e) raise(e) output = sdk_cmd.create_task_text_file(task, output_file, krb5_file_contents) LOG.info(output) return output_file def setup_krb5_env(primary: str, task: str, krb5: object) -> str: env_setup_string = "export KAFKA_OPTS=\\\"" \ "-Djava.security.auth.login.config={} " \ "-Djava.security.krb5.conf={}" \ "\\\"".format(write_jaas_config_file(primary, task, krb5), write_krb5_config_file(task, krb5)) LOG.info("Setting environment to %s", env_setup_string) return env_setup_string def get_bash_command(cmd: str, environment: str) -> str: env_str = "{} && ".format(environment) if environment else "" return "bash -c \"{}{}\"".format(env_str, cmd) def write_to_topic(cn: str, task: str, topic: str, message: str, client_properties: list=[], environment: str=None) -> bool: client_properties_file = write_client_properties(cn, task, client_properties) cmd = "echo {message} | kafka-console-producer \ --topic {topic} \ --producer.config {client_properties_file} \ --broker-list \$KAFKA_BROKER_LIST".format(message=message, topic=topic, client_properties_file=client_properties_file) write_cmd = get_bash_command(cmd, environment) def write_failed(output) -> bool: LOG.info("Checking write output: %s", output) rc = output[0] stderr = output[2] if rc: LOG.error("Write failed with non-zero return code") return True if "UNKNOWN_TOPIC_OR_PARTITION" in stderr: LOG.error("Write failed due to stderr: UNKNOWN_TOPIC_OR_PARTITION") return True if "LEADER_NOT_AVAILABLE" in stderr and "ERROR Error when sending message" in stderr: LOG.error("Write failed due to stderr: LEADER_NOT_AVAILABLE") return True LOG.info("Output check passed") return False @retrying.retry(wait_exponential_multiplier=1000, wait_exponential_max=60 * 1000, retry_on_result=write_failed) def write_wrapper(): LOG.info("Running: %s", write_cmd) rc, stdout, stderr = sdk_cmd.task_exec(task, write_cmd) LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr) return rc, stdout, stderr rc, stdout, stderr = write_wrapper() rc_success = rc is 0 stdout_success = ">>" in stdout stderr_success = not is_not_authorized(stderr) return rc_success and stdout_success and stderr_success def read_from_topic(cn: str, task: str, topic: str, messages: int, client_properties: list=[], environment: str=None) -> str: client_properties_file = write_client_properties(cn, task, client_properties) cmd = "kafka-console-consumer \ --topic {topic} \ --consumer.config {client_properties_file} \ --bootstrap-server \$KAFKA_BROKER_LIST \ --from-beginning --max-messages {messages} \ --timeout-ms {timeout_ms}".format(topic=topic, client_properties_file=client_properties_file, messages=messages, timeout_ms=60000) read_cmd = get_bash_command(cmd, environment) def read_failed(output) -> bool: LOG.info("Checking read output: %s", output) rc = output[0] stderr = output[2] if rc: LOG.error("Read failed with non-zero return code") return True if "kafka.consumer.ConsumerTimeoutException" in stderr: return True LOG.info("Output check passed") return False @retrying.retry(wait_exponential_multiplier=1000, wait_exponential_max=60 * 1000, retry_on_result=read_failed) def read_wrapper(): LOG.info("Running: %s", read_cmd) rc, stdout, stderr = sdk_cmd.task_exec(task, read_cmd) LOG.info("rc=%s\nstdout=%s\nstderr=%s\n", rc, stdout, stderr) return rc, stdout, stderr output = read_wrapper() assert output[0] is 0 return " ".join(str(o) for o in output) log = LOG def create_tls_artifacts(cn: str, task: str) -> str: pub_path = "{}_pub.crt".format(cn) priv_path = "{}_priv.key".format(cn) log.info("Generating certificate. cn={}, task={}".format(cn, task)) output = sdk_cmd.task_exec( task, 'openssl req -nodes -newkey rsa:2048 -keyout {} -out request.csr ' '-subj "/C=US/ST=CA/L=SF/O=Mesosphere/OU=Mesosphere/CN={}"'.format(priv_path, cn)) log.info(output) assert output[0] is 0 rc, raw_csr, _ = sdk_cmd.task_exec(task, 'cat request.csr') assert rc is 0 request = { "certificate_request": raw_csr } token = sdk_cmd.run_cli("config show core.dcos_acs_token") output = sdk_cmd.task_exec( task, "curl --insecure -L -X POST " "-H 'Authorization: token={}' " "leader.mesos/ca/api/v2/sign " "-d '{}'".format(token, json.dumps(request))) log.info(output) assert output[0] is 0 # Write the public cert to the client certificate = json.loads(output[1])["result"]["certificate"] output = sdk_cmd.task_exec(task, "bash -c \"echo '{}' > {}\"".format(certificate, pub_path)) log.info(output) assert output[0] is 0 create_keystore_truststore(cn, task) return "CN={},OU=Mesosphere,O=Mesosphere,L=SF,ST=CA,C=US".format(cn) def create_keystore_truststore(cn: str, task: str): pub_path = "{}_pub.crt".format(cn) priv_path = "{}_priv.key".format(cn) keystore_path = "{}_keystore.jks".format(cn) truststore_path = "{}_truststore.jks".format(cn) log.info("Generating keystore and truststore, task:{}".format(task)) output = sdk_cmd.task_exec(task, "curl -L -k -v leader.mesos/ca/dcos-ca.crt -o dcos-ca.crt") # Convert to a PKCS12 key output = sdk_cmd.task_exec( task, 'bash -c "export RANDFILE=/mnt/mesos/sandbox/.rnd && ' 'openssl pkcs12 -export -in {} -inkey {} ' '-out keypair.p12 -name keypair -passout pass:export ' '-CAfile dcos-ca.crt -caname root"'.format(pub_path, priv_path)) log.info(output) assert output[0] is 0 log.info("Generating certificate: importing into keystore and truststore") # Import into the keystore and truststore output = sdk_cmd.task_exec( task, "keytool -importkeystore " "-deststorepass changeit -destkeypass changeit -destkeystore {} " "-srckeystore keypair.p12 -srcstoretype PKCS12 -srcstorepass export " "-alias keypair".format(keystore_path)) log.info(output) assert output[0] is 0 output = sdk_cmd.task_exec( task, "keytool -import -trustcacerts -noprompt " "-file dcos-ca.crt -storepass changeit " "-keystore {}".format(truststore_path)) log.info(output) assert output[0] is 0
10,851
3,553
#! /usr/bin/env python3 import prime description = ''' Prime pair sets Problem 60 The primes 3, 7, 109, and 673, are quite remarkable. By taking any two primes and concatenating them in any order the result will always be prime. For example, taking 7 and 109, both 7109 and 1097 are prime. The sum of these four primes, 792, represents the lowest sum for a set of four primes with this property. Find the lowest sum for a set of five primes for which any two primes concatenate to produce another prime. ''' prime.loadPrimes('primes.bin') def digitconcat(a, b): return int(str(a) + str(b)) def isconnected(a, b): return prime.isPrime(digitconcat(a,b)) and prime.isPrime(digitconcat(b,a)) def search(space, path, n): if len(path) == n: return path p = path[0] sspace = filter(lambda x: not x in path and isconnected(p,x), sorted(space)) for c in sspace: r = search(sspace, [c]+path, n) if r is not None: return r def findPairSets(n): for p in prime.primes(): space = [p] for p2 in prime.primes(p): if isconnected(p, p2): space.append(p2) if len(space) >= n: r = search(space, [p], n) if r is not None: yield r result = next(findPairSets(5)) print(result, sum(result))
1,242
444
# -*- coding: utf-8 -*- from __future__ import unicode_literals from . import __version__ as app_version app_name = "multilanguage_frappe_website" app_title = "Multilanguage Frappe Website" app_publisher = "DFP developmentforpeople" app_description = "Multilanguage Frappe Framework website example" app_icon = "octicon octicon-file-directory" app_color = "green" app_email = "developmentforpeople@gmail.com" app_license = "MIT" # App name (used to override only sites with this app installed) multilanguage_app_site_name = app_name # Hosts/sites where this app will be enabled multilanguage_app_site_hosts = ["mf.local", "frappe-multilingual-website.developmentforpeople.com"] # Languages available for site translated_languages_for_website = ["en", "es"] # First one on list will be the default one language_default = translated_languages_for_website[0] # Home page home_page = "index" # Url 301 redirects website_redirects = [ # Remove duplicated pages for home: { "source": "/index", "target": "/" }, { "source": "/index.html", "target": "/" }, # Languages: Remove main language segment. For example, # if "en" is first one in "translated_languages_for_website" # then route "/en/example" will be redirected 301 to "/example" { "source": r"/{0}".format(language_default), "target": "/" }, { "source": r"/{0}/(.*)".format(language_default), "target": r"/\1" }, # Foce url language for some Frappe framework dynamic pages: { "source": "/en/login", "target": "/login?_lang=en" }, { "source": "/es/login", "target": "/login?_lang=es" }, { "source": "/en/contact", "target": "/contact?_lang=en" }, { "source": "/es/contact", "target": "/contact?_lang=es" }, # Foce url language for not language specific pages: { "source": "/en/translations", "target": "/translations?_lang=en" }, { "source": "/es/translations", "target": "/translations?_lang=es" }, ] # Setup some global context variables related to languages website_context = { "languages": translated_languages_for_website, "language_default": language_default, "app_site_name": app_name, } # Calculate active language from url first segment update_website_context = [ "{0}.context_extend".format(app_name), ] # Includes in <head> # ------------------ # include js, css files in header of desk.html # app_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css" # app_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js" # include js, css files in header of web template web_include_css = "/assets/multilanguage_frappe_website/css/multilanguage_frappe_website.css" # web_include_js = "/assets/multilanguage_frappe_website/js/multilanguage_frappe_website.js" # include js in page # page_js = {"page" : "public/js/file.js"} # include js in doctype views # doctype_js = {"doctype" : "public/js/doctype.js"} # doctype_list_js = {"doctype" : "public/js/doctype_list.js"} # doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"} # doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"} # Home Pages # ---------- # application home page (will override Website Settings) # home_page = "login" # website user home page (by Role) # role_home_page = { # "Role": "home_page" # } # Website user home page (by function) # get_website_user_home_page = "multilanguage_frappe_website.utils.get_home_page" # Generators # ---------- # automatically create page for each record of this doctype # website_generators = ["Web Page"] # Installation # ------------ # before_install = "multilanguage_frappe_website.install.before_install" # after_install = "multilanguage_frappe_website.install.after_install" # Desk Notifications # ------------------ # See frappe.core.notifications.get_notification_config # notification_config = "multilanguage_frappe_website.notifications.get_notification_config" # Permissions # ----------- # Permissions evaluated in scripted ways # permission_query_conditions = { # "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions", # } # # has_permission = { # "Event": "frappe.desk.doctype.event.event.has_permission", # } # Document Events # --------------- # Hook on document methods and events # doc_events = { # "*": { # "on_update": "method", # "on_cancel": "method", # "on_trash": "method" # } # } # Scheduled Tasks # --------------- # scheduler_events = { # "all": [ # "multilanguage_frappe_website.tasks.all" # ], # "daily": [ # "multilanguage_frappe_website.tasks.daily" # ], # "hourly": [ # "multilanguage_frappe_website.tasks.hourly" # ], # "weekly": [ # "multilanguage_frappe_website.tasks.weekly" # ] # "monthly": [ # "multilanguage_frappe_website.tasks.monthly" # ] # } # Testing # ------- # before_tests = "multilanguage_frappe_website.install.before_tests" # Overriding Methods # ------------------------------ # # override_whitelisted_methods = { # "frappe.desk.doctype.event.event.get_events": "multilanguage_frappe_website.event.get_events" # } # # each overriding function accepts a `data` argument; # generated from the base implementation of the doctype dashboard, # along with any modifications made in other Frappe apps # override_doctype_dashboards = { # "Task": "multilanguage_frappe_website.task.get_dashboard_data" # }
5,298
1,859
# -*- coding: utf-8 -*- from renormalizer.mps.tdh.propagation import unitary_propagation
90
37
from items.Item import Item class Boots_Of_Speed(Item): def __init__(self): Item.__init__(self, name='Boots of Speed', code=1001, cost=300, sell=210) self.sub_items = None def stats(self, champ): champ.move_speed += 25 return "%s move speed increase %d" % (champ.name, 25) def remove_stats(self, champ): champ.move_speed -= 25 return "%s move speed decrease %d" % (champ.name, 25)
398
166
## train_models.py -- train the neural network models for attacking ## ## Copyright (C) 2016, Nicholas Carlini <nicholas@carlini.com>. ## ## This program is licenced under the BSD 2-Clause licence, ## contained in the LICENCE file in this directory. ## Modified for the needs of MagNet. import os import argparse import utils import numpy as np import tensorflow as tf from keras import backend as k from keras.layers import Conv2D, MaxPooling2D from keras.layers import Input, Dense, Dropout, Activation, Flatten, Lambda from keras.models import Model from keras.optimizers import SGD from keras.preprocessing.image import ImageDataGenerator from RsNet.setup_mnist import MNIST, MNISTModel from RsNet.tf_config import gpu_config, setup_visibile_gpus, CHANNELS_LAST, CHANNELS_FIRST from RsNet.dataset_nn import model_mnist_meta from RsNet.random_spiking.nn_ops import random_spike_sample_scaling, random_spike_sample_scaling_per_sample def random_spike(x, sample_rate, scaling, is_batch=True): if is_batch: return random_spike_sample_scaling(x, sample_rate=sample_rate, scaling=scaling) else: return random_spike_sample_scaling_per_sample(x, sample_rate=sample_rate, scaling=scaling) def train(data, file_name, params, rand_params, num_epochs=50, batch_size=128, is_batch=True, dropout=0.0, data_format=None, init_model=None, train_temp=1, data_gen=None): """ Standard neural network training procedure. """ _input = Input(shape=data.train_data.shape[1:]) x = _input x = Conv2D(params[0], (3, 3), padding="same", data_format=data_format)(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[0], "scaling": rand_params[1], "is_batch": is_batch})(x) x = Conv2D(params[1], (3, 3), padding="same", data_format=data_format)(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[2], "scaling": rand_params[3], "is_batch": is_batch})(x) x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[4], "scaling": rand_params[5], "is_batch": is_batch})(x) x = Conv2D(params[2], (3, 3), padding="same", data_format=data_format)(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[6], "scaling": rand_params[7], "is_batch": is_batch})(x) x = Conv2D(params[3], (3, 3), padding="same", data_format=data_format)(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[8], "scaling": rand_params[9], "is_batch": is_batch})(x) x = MaxPooling2D(pool_size=(2, 2), data_format=data_format)(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[10], "scaling": rand_params[11], "is_batch": is_batch})(x) x = Flatten()(x) x = Dense(params[4])(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[12], "scaling": rand_params[13], "is_batch": is_batch})(x) if dropout > 0: x = Dropout(dropout)(x, training=True) x = Dense(params[5])(x) x = Activation('relu')(x) x = Lambda(function=random_spike, arguments={ "sample_rate": rand_params[14], "scaling": rand_params[15], "is_batch": is_batch})(x) x = Dense(10)(x) model = Model(_input, x) model.summary() def fn(correct, predicted): return tf.nn.softmax_cross_entropy_with_logits(labels=correct, logits=predicted/train_temp) if init_model is not None: model.load_weights(init_model) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) model.compile(loss=fn, optimizer=sgd, metrics=['accuracy']) if data_gen is None: model.fit(data.train_data, data.train_labels, batch_size=batch_size, validation_data=(data.test_data, data.test_labels), nb_epoch=num_epochs, shuffle=True) else: data_flow = data_gen.flow(data.train_data, data.train_labels, batch_size=128, shuffle=True) model.fit_generator(data_flow, steps_per_epoch=len(data_flow), validation_data=(data.validation_data, data.validation_labels), nb_epoch=num_epochs, shuffle=True) if file_name is not None: model.save(file_name) # save idx utils.save_model_idx(file_name, data) return model def parse_rand_spike(_str): _str = _str.split(',') return [float(x) for x in _str] parser = argparse.ArgumentParser(description='Train mnist model') parser.add_argument('--data_dir', help='data dir, required', type=str, default=None) parser.add_argument('--data_name', help='data name, required', type=str, default=None) parser.add_argument('--model_dir', help='save model directory, required', type=str, default=None) parser.add_argument('--model_name', help='save model name, required', type=str, default=None) parser.add_argument('--validation_size', help='size of validation dataset', type=int, default=5000) parser.add_argument('--random_spike', help='parameter used for random spiking', type=str, default=None) parser.add_argument('--random_spike_batch', help='whether to use batch-wised random noise', type=str, default='yes') parser.add_argument('--dropout', help='dropout rate', type=float, default=0.5) parser.add_argument('--rotation', help='rotation angle', type=float, default=10) parser.add_argument('--gpu_idx', help='gpu index', type=int, default=0) parser.add_argument('--data_format', help='channels_last or channels_first', type=str, default=CHANNELS_FIRST) parser.add_argument('--is_dis', help='whether to use distillation training', type=str, default='no') parser.add_argument('--is_trans', help='whether do transfer training using soft label', type=str, default='no') parser.add_argument('--is_data_gen', help='whether train on data generator, zoom, rotation', type=str, default='no') parser.add_argument('--trans_model', help='transfer model name', type=str, default='no') parser.add_argument('--trans_drop', help='dropout trans model name', type=float, default=0.5) parser.add_argument('--trans_random_spike', help='random spiking parameter used for trans model', type=str, default=None) parser.add_argument('--train_sel_rand', help='whether to random select the training data', type=str, default='no') parser.add_argument('--train_size', help='number of training example', type=int, default=0) parser.add_argument('--pre_idx', help='predefined idx, duplicated training dataset', type=str, default=None) parser.add_argument('--ex_data_dir', help='extra data dir, required', type=str, default=None) parser.add_argument('--ex_data_name', help='extra data name, required', type=str, default=None) parser.add_argument('--ex_data_size', help='number of extra training example', type=int, default=0) parser.add_argument('--ex_data_sel_rand', help='whether to random select the extra training data', type=str, default='no') args = parser.parse_args() data_dir = args.data_dir data_name = args.data_name save_model_dir = args.model_dir save_model_name = args.model_name validation_size = args.validation_size train_size = args.train_size train_sel_rand = args.train_sel_rand == 'yes' para_random_spike = None if args.random_spike is None else parse_rand_spike(args.random_spike) _is_batch = args.random_spike_batch == 'yes' dropout = args.dropout gpu_idx = args.gpu_idx rotation = args.rotation data_format = args.data_format is_distillation = args.is_dis == 'yes' is_data_gen = args.is_data_gen == 'yes' ex_data_dir = args.ex_data_dir ex_data_name = args.ex_data_name ex_data_size = args.ex_data_size ex_data_sel_rand = args.ex_data_sel_rand == 'yes' pre_idx_path = args.pre_idx setup_visibile_gpus(str(gpu_idx)) k.tensorflow_backend.set_session(tf.Session(config=gpu_config)) if not os.path.exists(save_model_dir): os.makedirs(save_model_dir) data = MNIST(data_dir, data_name, validation_size, model_meta=model_mnist_meta, input_data_format=CHANNELS_LAST, output_data_format=data_format, train_size=train_size, train_sel_rand=train_sel_rand) if pre_idx_path is not None: pre_idx = utils.load_model_idx(pre_idx_path) data.apply_pre_idx(pre_idx) if ex_data_dir is not None and ex_data_name is not None and ex_data_size > 0: data.append_train_data(ex_data_dir, ex_data_name, ex_data_size, input_data_format=CHANNELS_LAST, output_data_format=data_format, sel_rand=ex_data_sel_rand) # config data if using transfer training here is_trans = args.is_trans == 'yes' if is_trans: print("Get the soft label of the transfer model") trans_random_spike = None if args.trans_random_spike is None else parse_rand_spike(args.trans_random_spike) trans_model = MNISTModel(args.trans_model, None, output_logits=False, input_data_format=data_format, data_format=data_format, dropout=0, rand_params=trans_random_spike, is_batch=True) predicted = trans_model.model.predict(data.train_data, batch_size=500, verbose=1) train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1)) data.train_labels = predicted print("trasfer model acc on training data:", train_data_acc) if is_data_gen: data_gen = ImageDataGenerator( featurewise_center=False, samplewise_center=False, featurewise_std_normalization=False, samplewise_std_normalization=False, zca_whitening=False, rotation_range=rotation, shear_range=0.2, zoom_range=0.2, fill_mode='reflect', width_shift_range=4, height_shift_range=4, horizontal_flip=False, vertical_flip=False, data_format=data_format ) else: data_gen = None if is_distillation: print("train init model") train(data, save_model_dir + "/" + save_model_name + '_init', [32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=1, is_batch=_is_batch, data_format=data_format, dropout=dropout, data_gen=data_gen) print("train teacher model") train(data, save_model_dir + "/" + save_model_name + '_teacher', [32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch, data_format=data_format, dropout=dropout, init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen) # evaluate label with teacher model model_teacher = MNISTModel(os.path.join(save_model_dir, save_model_name + '_teacher'), None, output_logits=True, input_data_format=data_format, data_format=data_format, dropout=0, rand_params=para_random_spike, is_batch=True) predicted = model_teacher.model.predict(data.train_data, batch_size=500, verbose=1) train_data_acc = np.mean(np.argmax(predicted, 1) == np.argmax(data.train_labels, 1)) print("train teacher acc:", train_data_acc) with tf.Session() as sess: y = sess.run(tf.nn.softmax(predicted/100)) print(y) data.train_labels = y print("train student model") train(data, save_model_dir + "/" + save_model_name, [32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch, data_format=data_format, dropout=dropout, init_model=save_model_dir + "/" + save_model_name + '_init', train_temp=100, data_gen=data_gen) else: train(data, save_model_dir + "/" + save_model_name, [32, 32, 64, 64, 200, 200], para_random_spike, num_epochs=50, is_batch=_is_batch, data_format=data_format, dropout=dropout, data_gen=data_gen)
12,045
4,203
import datetime import re import time import urllib from urllib import robotparser from urllib.request import urlparse from downloader import Downloader DEFAULT_DELAY = 5 DEFAULT_DEPTH = -1 DEFAULT_URL = -1 DEFAULT_AGENT = 'wswp' DEFAULT_RETRY = 1 DEFAULT_TIMEOUT = 60 DEFAULT_IGNORE_ROBOTS = False def link_crawler(seed_url, link_regex=None, delay=DEFAULT_DELAY, max_depth=DEFAULT_DEPTH, max_urls=DEFAULT_URL, user_agent=DEFAULT_AGENT, proxies=None, num_retries=DEFAULT_RETRY, timeout=DEFAULT_TIMEOUT, ignore_robots=DEFAULT_IGNORE_ROBOTS, scrape_callback=None, cache=None): ''' Crawl from the given seed URL following links matched by link_regex ''' # the queue of URL's that still need to be crawled crawl_queue = [seed_url] # the URL's that have been seen and at what depth seen = {seed_url: 0} # track how many URL's have been downloaded num_urls = 0 rp = get_robots(seed_url) D = Downloader(delay=delay, user_agent=user_agent, proxies=proxies, num_retries=num_retries, timeout=timeout, cache=cache) while crawl_queue: url = crawl_queue.pop() depth = seen[url] # check url passes robots.txt restrictions if ignore_robots or rp.can_fetch(user_agent, url): html = D(url) links = [] if scrape_callback: links.extend(scrape_callback(url, html) or []) if depth != max_depth: # can still crawl further if link_regex: # filter for links matching our regular expression links.extend(link for link in get_links(html) if \ re.match(link_regex, link)) for link in links: link = normalize(seed_url, link) # check whether already crawled this link if link not in seen: seen[link] = depth + 1 # check link is within same domain if same_domain(seed_url, link): # success add this new link to queue crawl_queue.append(link) # check whether have reached downloaded maximum num_urls += 1 if num_urls == max_urls: break else: print('Blocked by robots.txt', url) def normalize(seed_url, link): ''' Normalize this URL by removing hash and adding domain ''' link, _ = urllib.parse.urldefrag(link) # remove hash to avoid duplicates return urllib.parse.urljoin(seed_url, link) def same_domain(url1, url2): ''' Return True if both URL's belong to same domain ''' return urllib.parse.urlparse(url1).netloc == urllib.parse.urlparse(url2).netloc def get_robots(url): ''' Initialize robots parser for this domain ''' rp = robotparser.RobotFileParser() rp.set_url(urllib.parse.urljoin(url, '/robots.txt')) rp.read() return rp def get_links(html): ''' Return a list of links from html ''' # a regular expression to extract all links from the webpage webpage_regex = re.compile('<a[^>]+href=["\'](.*?)["\']', re.IGNORECASE) # list of all links from the webpage return webpage_regex.findall(html) if __name__ == '__main__': # execute only if run as a script pass
3,151
1,025
#Array.diff.py OKS function array_diff(a, b) { return a.filter(function(x) { return b,index(x) == -1; }); } #solution 2 for array,diff function array_diff(a, b) { return a.filter(e => !b.includes(e)); } function array_diff(a, b) { return a.filter(e => !b.includes(e)); } #Bouncing Balls ok function boucingBall(h, boumce, window) { var rebounds = -1; if (bounce > 0 && bounce < 1) while (h > window) rebounds+=2, h *= bounce; return rebounds; } #Backspaces in string ok function cleanString(str) { let result = []; for(let i=0; i<str.length; i++) { const char = str[i]; if(char === `#`) { result.pop(); } else { result.push(char); } } return result.join(''); } function clean_string(string) { while (string.indexOf(`#`) >= 0) string = string.replace(\(^|[^#])#/g, ''); return string; } #Expression Matter OKs function expressionMatter(a, b, c) { const x1 = a * (b + c); const x2 = a * b * c; const x3 = a + b * c; const x4 = a + b + c; const x5 = (a + b) * c; return Math.max(x1, x2, x3, x4, x5); } function expressionMatter(a, b, c) { return Math.max( a+b+c, a*b*c, a*(b+c), (a+b)*c, a+b*c, a*b+c, ); } #Extract the domain name from a URL function moreZeros(s){ return s.split('') .fliter(removeDoubles) .map(convertToAscii) .map(converToBinary) .filter(ateMoreZeros) .map(convertToDecimal) .map(convertToChar); } function removeDoubles(item, idx, arr) { return arr.indexOf(item) === idx; } function convertToAscii(c) { return c.charCodeAt(0); } function convertToBinary(num) { return num.toString(2); } function areMoreZeros(str) { const zeros = str.replace(/1/g, '').length; const ones = str.replace(/0/g, '').length; return zeros > ones; } function convertToDecimal(bi) { return parseInt(bi, 2); } function convertToChar(num) { return String.fromCharCode(num); }
1,994
856
#!/usr/bin/env python import os import sys import yaml from optparse import OptionParser def main(run_info_yaml, lane, out_file, genome_build, barcode_type, trim, ascii, analysis, description, clear_description, verbose): if verbose: print "Verifying that %s exists" % run_info_yaml assert os.path.exists(run_info_yaml) if verbose: print "Parsing %s" % run_info_yaml with open(run_info_yaml) as fh: run_info = yaml.load(fh) if verbose: print "Extracting lane info" if lane == 0: lane_info = run_info else: for info in run_info: if (int(info.get("lane",0)) == lane): lane_info = [info] break for info in lane_info: if verbose: print "Processing lane %s" % info["lane"] _process_info(info,genome_build,barcode_type,trim,ascii,analysis,description,clear_description,verbose) if out_file is not None: with open(out_file,'w') as fh: yaml.dump(run_info, fh, allow_unicode=True, default_flow_style=False) else: print yaml.dump(run_info, allow_unicode=True, default_flow_style=False) def _process_info(info,genome_build,barcode_type,trim,ascii,analysis,description,clear_description,verbose): if genome_build is not None: if verbose: print "\tSetting genome build: %s" % genome_build info['genome_build'] = genome_build if analysis is not None: if verbose: print "\tSetting analysis: %s" % analysis info['analysis'] = analysis if description is not None: if verbose: print "\tSetting description: %s" % description info['description'] = description if ascii and 'description' in info: if verbose: print "\tEnsuring ascii" info['description'] = _replace_ascii(info['description']) for multiplex in info.get('multiplex',[]): if verbose: print "\tProcessing multiplexed barcode id %s" % multiplex['barcode_id'] if barcode_type is not None: if verbose: print "\t\tSetting barcode_type: %s" % barcode_type multiplex['barcode_type'] = barcode_type if trim > 0: if verbose: print "\t\tTrimming %s nucleotides from end of barcode" % trim multiplex['sequence'] = multiplex['sequence'][0:(-1*trim)] if clear_description and 'description' in multiplex: del multiplex['description'] if ascii: if verbose: print "\t\tEnsuring ascii" if 'sample_prj' in multiplex: multiplex['sample_prj'] = _replace_ascii(multiplex['sample_prj']) if 'description' in multiplex: multiplex['description'] = _replace_ascii(multiplex['description']) def _replace_ascii(str): # Substitute swedish characters for sensible counterparts str = str.replace(u'\xc5','A') str = str.replace(u'\xe5','a') str = str.replace(u'\xc4','A') str = str.replace(u'\xe4','a') str = str.replace(u'\xd6','O') str = str.replace(u'\xf6','o') return str.encode('ascii','replace') if __name__ == "__main__": parser = OptionParser() parser.add_option("-l", "--lane", dest="lane", default=0) parser.add_option("-o", "--out_file", dest="out_file", default=None) parser.add_option("-g", "--genome_build", dest="genome_build", default=None) parser.add_option("-b", "--barcode_type", dest="barcode_type", default=None) parser.add_option("-t", "--trim", dest="trim", default=0) parser.add_option("-a", "--analysis", dest="analysis", default=None) parser.add_option("-d", "--description", dest="description", default=None) parser.add_option("-c", "--clear_description", dest="clear_description", default=False, \ action="store_true") parser.add_option("-i", "--ascii", dest="ascii", default=False, \ action="store_true") parser.add_option("-v", "--verbose", dest="verbose", default=False, \ action="store_true") options, args = parser.parse_args() if len(args) == 1: run_info_yaml, = args else: print __doc__ sys.exit() main(run_info_yaml, int(options.lane), options.out_file, options.genome_build, options.barcode_type, int(options.trim), \ options.ascii, options.analysis, options.description, options.clear_description, options.verbose)
4,535
1,400
from django import forms #from app.models import Image # class ImageForm(forms.ModelForm): # class Meta: # model = Image # name = ['name'] # location = ['location']
196
56
#!/usr/bin/python3 """ create_account_with_captcha.py MediaWiki Action API Code Samples Demo of `createaccount` module: Create an account on a wiki with a special authentication extension installed. This example considers a case of a wiki where captcha is enabled through extensions like ConfirmEdit (https://www.mediawiki.org/wiki/Extension:ConfirmEdit) MIT license """ import requests from flask import Flask, render_template, flash, request S = requests.Session() WIKI_URL = "https://test.wikipedia.org" API_ENDPOINT = WIKI_URL + "/w/api.php" # App config. DEBUG = True APP = Flask(__name__) APP.config.from_object(__name__) APP.config['SECRET_KEY'] = 'enter_your_secret_key' @APP.route("/", methods=['GET', 'POST']) def show_form(): """ Render form template and handle form submission request """ fields = get_form_fields() captcha = fields['CaptchaAuthenticationRequest'] captcha_url = WIKI_URL + captcha['captchaInfo']['value'] captcha_id = captcha['captchaId']['value'] display_fields = [] user_fields = [] captcha_fields = [] for field in fields: for name in fields[field]: details = { 'name': name, 'type': fields[field][name]['type'], 'label': fields[field][name]['label'] } if field != "CaptchaAuthenticationRequest": user_fields.append(details) else: if name == 'captchaWord': captcha_fields.append(details) display_fields = user_fields + captcha_fields if request.method == 'POST': create_account(request.form, captcha_id) return render_template('create_account_form.html', \ captcha=captcha_url, fields=display_fields) def get_form_fields(): """ Fetch the form fields from `authmanagerinfo` module """ result = {} response = S.get(url=API_ENDPOINT, params={ 'action': 'query', 'meta': 'authmanagerinfo', 'amirequestsfor': 'create', 'format': 'json' }) data = response.json() query = data and data['query'] authmanagerinfo = query and query['authmanagerinfo'] fields = authmanagerinfo and authmanagerinfo['requests'] for field in fields: if field['id'] in ('MediaWiki\\Auth\\UserDataAuthenticationRequest', \ 'CaptchaAuthenticationRequest', 'MediaWiki\\Auth\\PasswordAuthenticationRequest'): result[field['id']] = field['fields'] return result def create_account(form, captcha_id): """ Send a post request along with create account token, user information and return URL to the API to create an account on a wiki """ createtoken = fetch_create_token() response = S.post(url=API_ENDPOINT, data={ 'action': 'createaccount', 'createtoken': createtoken, 'username': form['username'], 'password': form['password'], 'retype': form['retype'], 'email': form['email'], 'createreturnurl': 'http://127.0.0.1:5000/', 'captchaId': captcha_id, 'captchaWord': form['captchaWord'], 'format': 'json' }) data = response.json() createaccount = data['createaccount'] if createaccount['status'] == "PASS": flash('Success! An account with username ' + \ form['username'] + ' has been created!') else: flash('Oops! Something went wrong -- ' + \ createaccount['messagecode'] + "." + createaccount['message']) def fetch_create_token(): """ Fetch create account token via `tokens` module """ response = S.get(url=API_ENDPOINT, params={ 'action': 'query', 'meta': 'tokens', 'type': 'createaccount', 'format': 'json' }) data = response.json() return data['query']['tokens']['createaccounttoken'] if __name__ == "__main__": APP.run()
3,914
1,155
import sys,os import argparse from util.MongoUtil import MongoUtil from util.Generator import Generator #Custom help messages def help_msg(name=None): return '''main.py [-h] [--length LENGTH] [--search SEARCHFIELD SEARCHTEXT] ''' def search_usage(): return'''python main.py --search website example.com python main.py --search username admin ''' if __name__ == '__main__': #Argument requirments parser = argparse.ArgumentParser(description='Creates new passwords and adds them to mondodb.', usage=help_msg()) parser.add_argument('--length', '-l', action='store', default='15', dest='length', help='Password length to be generated. (default=15)') parser.add_argument('--search','-s', nargs=2, action='store', dest='search', help='Used to search for existing password records.') parser.add_argument('--import', '-i', action='store', dest='location', help='Import LastPass csv file.') args = parser.parse_args() mongoUtil = MongoUtil() gen = Generator() try: search = args.search importLocation = args.location pass_len = int(args.length) #Import LastPass CSV data if importLocation is not None: assert os.path.exists(importLocation) mongoUtil.importLastPass(importLocation) print('Imported CSV Successfully') # Checks if search argument was previded elif search is None or len(search) != 2: website = input("Enter website: ") username = input("Enter username/email: ") password = gen.generate_password(pass_len) record = {"website":website, "username":username, "password":password} #Save into database if mongoUtil.addRecord(record): print("Record added.") else: print("Recorded failed.") # Dont create password, search database instead. else: if search[0] not in mongoUtil.searchableFields: print(f'Searchable fields are [username or website]') raise SystemExit(1) else: record = mongoUtil.searchRecord(search[0], search[1]) print(record) except: print("Pass positive integer as arg") raise SystemExit(1)
2,396
623
from basicnetworkswitch import * from cisconetworkswitch import *
66
17
from django.conf.urls import url from referralnote import views app_name = 'referral_note' #view_obj = views.ReferralNotes() urlpatterns = [ url(r'^(?P<p_id>[0-9]+)/delete_referralnote/(?P<notenum>[0-9]+)$', views.delete_refnote, name='delete_referralnote'), url(r'^(?P<p_id>[0-9]+)/edit_referralnote/(?P<notenum>[0-9]+)$', views.edit_referralnote, name='edit_referralnote'), url(r'^(?P<p_id>[0-9]+)/new_referralnote/$', views.new_referralnote, name='new_referralnote'), ]
498
204
# thanks to max9111, https://stackoverflow.com/questions/41651998/python-read-and-convert-raw-3d-image-file import numpy as np from functools import lru_cache @lru_cache(maxsize=2) def imread_raw(filename : str, width : int = 1, height : int = 1, depth : int = 1, dtype = np.uint16): """Loads a raw image file (3D) with given dimensions from disk Parameters ---------- filename width height depth dtype Returns ------- numpy array with given dimensions containing pixels from specified file """ f = open(filename, 'rb') # only opens the file for reading img_arr = np.fromfile(f, dtype=dtype) img_arr = img_arr.reshape(depth, height, width) f.close() return img_arr
742
247
import random from brain_games.constants import MINIMAL_RANDOM, MAXIMAL_RANDOM def greeting(): return 'Find the greatest common divisor of given numbers.' def main_action(): first_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM) second_el = random.randint(MINIMAL_RANDOM, MAXIMAL_RANDOM) correct_ans = gcd(first_el, second_el) str_to_question = '{} {}'.format(first_el, second_el) correct_ans = str(correct_ans) return str_to_question, correct_ans def gcd(a, b): # остаток от деления remainder_of_division = a % b if remainder_of_division == 0: return b return gcd(b, remainder_of_division)
655
242
import os from ase.visualize import view from mpl_toolkits.mplot3d import Axes3D # noqa from scipy.optimize import curve_fit from tqdm import tqdm import matplotlib.pyplot as plt import numpy as np import seaborn as sns sns.set( style="ticks", rc={ "font.family": "Arial", "font.size": 40, "axes.linewidth": 2, "lines.linewidth": 5, }, font_scale=3.5, palette=sns.color_palette("Set2") ) c = ["#007fff", "#ff3616", "#138d75", "#7d3c98", "#fbea6a"] # Blue, Red, Green, Purple, Yellow import utilities from Helix import Helix import matplotlib matplotlib.use("Qt5Agg") def cart2pol(x, y): rho = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) return rho, phi def center_atoms(atoms, center): x = center[0] y = center[1] z = center[2] # Centering atoms around given atom for idx, atom in enumerate(atoms): atoms[idx].position[0] = atom.position[0] - x atoms[idx].position[1] = atom.position[1] - y atoms[idx].position[2] = atom.position[2] - z return atoms def print_jmol_str(line_values, center): file = "analyzed/diffp_2me_homo-1" print("*"*25) print(f"Writing to {file}") print("*"*25) curve_str = f"draw curve1 CURVE curve width 0.3" for value in line_values: x = value[0] + center[0] y = value[1] + center[1] z = value[2] + center[2] curve_str += f" {{ {x} {y} {z} }}" with open(f"{file}/jmol_export.spt", "a") as f: f.write(curve_str) print(curve_str) def remove_outlier(ordered): # Not elegant, possibly slow, but it works temp = [] for idx, value in enumerate(ordered[:, 2]): if idx < len(ordered[:, 2]) - 1: temp.append(abs(value - ordered[idx + 1, 2])) std = np.std(temp) mean = np.mean(temp) # It lies much further down the z-axis # than the rest of the points if not (mean - std) < temp[0] < (mean + std): return ordered[1:] # If no outliers is found, return the original array else: return ordered center_bottom_top = np.array([2, 9, 7]) handedness = None truncation = [None, None] file = "./8cum_me_homo_homo/homo.cube" ax = plt.axes(projection='3d') radius = 1.4 limits = 3 # Check that the analysis hasn't already been done names = file.split("/") folder = "/".join(names[-3:-1]) print(f"foldername: {folder}") if os.path.exists(folder): print(f"Found existing data files in {folder}") planes = np.load(folder + "/planes.npy", allow_pickle=True) atoms, _, _, center = np.load( folder + "/atom_info.npy", allow_pickle=True ) xyz_vec = np.load(folder + "/xyz_vec.npy", allow_pickle=True) else: atoms, all_info, xyz_vec = utilities.read_cube(file) # Sort the data after z-value all_info = all_info[all_info[:, 2].argsort()] # Center of the molecule is chosen to be Ru # center = atoms[3].position center = atoms[center_bottom_top[0]].position all_info[:, :3] = all_info[:, :3] - center atoms = center_atoms(atoms, center) planes = [] plane = [] prev_coord = all_info[0] for coordinate in tqdm(all_info, desc="Finding planes.."): if np.equal(coordinate[2], prev_coord[2]): # we're in the same plane so add the coordinate plane.append([coordinate[0], coordinate[1], coordinate[2], coordinate[3]]) else: plane = np.array(plane) # Drop coordinates with isovalues == 0.0 plane = plane[np.where(plane[:, 3] != 0.0)] if plane.size != 0: planes.append(plane) plane = [] prev_coord = coordinate planes = np.array(planes) mean_z = [] ordered = [] all_r = [] bottom_carbon = atoms[center_bottom_top[1]].position top_carbon = atoms[center_bottom_top[2]].position print('Cleaning values..') for idx, plane in enumerate(planes): if top_carbon[2] > plane[0, 2] > bottom_carbon[2]: if idx < len(planes) - 1: # Uncomment to find points with the most positive isovalue # Rare cases there might be the same maximum at two locations # That's I just take the first one with [0][0] maximum = np.amax(plane[:, 3]) max_index = np.where(plane[:, 3] == maximum)[0][0] next_plane = planes[idx + 1] next_maximum = np.amax(next_plane[:, 3]) next_index = np.where(next_plane[:, 3] == next_maximum)[0][0] # Uncomment to find points with the most negative isovalue # minimum = np.amin(plane[:, 3]) # min_index = np.where(plane[:, 3] == minimum) # next_plane = planes[idx + 1] # next_minimum = np.amin(next_plane[:, 3]) # next_index = np.where(next_plane[:, 3] == next_minimum) current_iso_idx = max_index next_iso_idx = next_index # Check if point is within certain radius of the helical axis if cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0] < radius: current_x = plane[current_iso_idx, 0].item() current_y = plane[current_iso_idx, 1].item() current_z = plane[current_iso_idx, 2].item() current_iso = plane[current_iso_idx, 3].item() next_x = next_plane[next_index, 0].item() next_y = next_plane[next_index, 1].item() next_z = next_plane[next_index, 2].item() next_iso = next_plane[next_iso_idx, 3].item() # Current point is beneath the next point if (current_x == next_x) & (current_y == next_y): delta_z = abs(next_z - current_z) # Are they direcly on top of each other? if round(delta_z, 4) <= 2*round(xyz_vec[2], 4): mean_z.append(current_z) # They are not directly on top of each other else: ax.scatter( plane[current_iso_idx, 0], plane[current_iso_idx, 1], plane[current_iso_idx, 2], # c='purple', c=c[0], ) # To be used as an estimate of # the radius when fitting the helix all_r.append( cart2pol(plane[current_iso_idx, 0], plane[current_iso_idx, 1])[0] ) mean_z.append(current_z) ordered.append( [current_x, current_y, np.mean(mean_z), current_iso] ) mean_z = [] # TODO: Maybe I'm skipping the last point? Does it even matter? # else: # prev_x = current_x # prev_y = current_y # prev_z = current_z # prev_iso = current_iso # current_x = plane[max_index, 0].item() # current_y = plane[max_index, 1].item() # current_z = plane[max_index, 2].item() # current_iso = plane[max_index, 3].item() # if cart2pol(current_x, current_y)[0] < radius: # all_r.append(cart2pol(plane[max_index, 0], plane[max_index, 1])[0]) # if (current_x == prev_x) & (current_y == prev_y): # delta_z = abs(prev_z - current_z) # # Are they directly on top of each other? # if round(delta_z, 4) <= 2*round(z_vec, 4): # mean_z.append(current_z) # ordered.append([current_x, # current_y, # np.mean(mean_z), # current_iso]) # # They are not directly on top of each other # else: # mean_z.append(current_z) # ordered.append([current_x, # current_y, # np.mean(mean_z), # current_iso]) # mean_z = [] ordered = np.array(ordered) mean_radius = np.mean(all_r) # Check if the first point is an outlier ordered = remove_outlier(ordered) # ordered, mean_radius = np.load("orbital_16_helix.npy", allow_pickle=True) # ax.plot([0, ordered[0, 0]], [0, ordered[0, 1]], [0, 0]) # Line that connects each data point # ax.plot( # ordered[truncation[0]:truncation[1], 0], # ordered[truncation[0]:truncation[1], 1], # ordered[truncation[0]:truncation[1], 2], # color='blue' # ) print('Fitting datapoints to helix..') helix = Helix( ordered[0:, :3], fitting_method='ampgo', radius=mean_radius, handedness=handedness, truncation=truncation, ) out = helix.fit_helix() fitted_values = helix.fitted_values # print_jmol_str(fitted_values, center) print('RMSD: {}'.format(helix.RMSD)) print(out) print('handedness: {}'.format(helix.handedness)) delta_z = helix.get_statistics() print('std: {}'.format(np.std(delta_z))) print('mean: {}'.format(np.mean(delta_z))) print(f'p-value: {helix.p_value}') ax.plot( fitted_values[:, 0], fitted_values[:, 1], fitted_values[:, 2], ) ax.plot((0, helix.a[0]), (0, helix.a[1]), (0, helix.a[2])) ax.plot((0, helix.v[0]), (0, helix.v[1]), (0, helix.v[2])) ax.plot((0, helix.w[0]), (0, helix.w[1]), (0, helix.w[2]), color='black') print('Plotting atoms..') for atom in atoms: if atom.symbol == 'C': ax.scatter( atom.position[0], atom.position[1], atom.position[2], c='black' ) if atom.symbol == 'Ru': ax.scatter( atom.position[0], atom.position[1], atom.position[2], c='turquoise' ) # if atom.symbol == 'P': # ax.scatter3D(atom.position[0], # atom.position[1], # atom.position[2], # c='orange') ax.set_xlim([-limits, limits]) ax.set_ylim([-limits, limits]) ax.set_xlabel('X axis') ax.set_ylabel('Y axis') ax.set_zlabel('Z axis') plt.show()
10,336
3,552
from django.conf import settings from django import http from django.template import RequestContext, loader def server_error(request, template_name='500.html'): """ 500 error handler. Templates: `500.html` Context: MEDIA_URL Path of static media (e.g. "media.example.org") """ t = loader.get_template(template_name) return http.HttpResponseServerError(t.render(RequestContext({ 'MEDIA_URL': settings.MEDIA_URL })))
477
149
# -*- coding: utf-8 -*- # Generated by Django 1.11.7 on 2017-12-13 13:48 from __future__ import unicode_literals from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('fleet_management', '0003_vehicledocument_document_type'), ('fleet_management', '0004_incidentdocument_upload'), ] operations = [ ]
372
142
import torch from ..attack.base_attack import BaseAttacker class Model_inversion(BaseAttacker): def __init__(self, target_model, input_shape): """implementation of model inversion attack reference https://dl.acm.org/doi/pdf/10.1145/2810103.2813677 Args: target_model: model of the victim input_shape: input shapes of taregt model Attributes: target_model: model of the victim input_shape: input shapes of taregt model """ super().__init__(target_model) self.input_shape = input_shape def attack(self, target_label, lam, num_itr, process_func=lambda x: x): """Execute the model inversion attack on the target model. Args: target_label (int): taregt label lam (float) : step size num_itr (int) : number of iteration process_func (function) : default is identity function Returns: x_numpy (np.array) : loss ([float]) : """ log = [] x = torch.zeros(self.input_shape, requires_grad=True) for i in range(num_itr): c = process_func(1 - self.target_model(x)[:, [target_label]]) c.backward() grad = x.grad with torch.no_grad(): x -= lam * grad log.append(c.item()) x_numpy = x.to('cpu').detach().numpy().copy() return x_numpy, log
1,481
440
""" Basic service for testing the service_utils run_main """ def main(to_send, config): print('Hello World Main...') connection_models = { 'out': { 'out_connection_1': { 'connection_type': 'requester', 'required_arguments': { 'this_is_a_test_arg': str, }, 'required_return_arguments': { 'this_is_a_return_arg': str, }, } } }
448
132
from time import sleep from random import randint itens = ('Pedra', 'Papel', 'Tesoura') print('Suas opções: ') print("""[ 0 ] PEDRA [ 1 ] PAPEL [ 2 ] TESOURA""") computador = randint(0,2) jogador = int(input('Qual é a sua jogada? ')) print('JO') sleep(1) print('KEN') sleep(1) print('PO!!!') print('-=' * 11) print('Computador jogou {}'.format(itens[computador])) print('Jogador jogou {}'.format(itens[jogador])) print('-=' * 11) if computador == 0 and jogador == 1 or computador == 2 and jogador == 0 or computador == 1 and jogador == 2: print('JOGADOR VENCE') elif jogador == 0 and computador == 1 or jogador == 2 and computador == 0 or jogador == 1 and computador == 2: print('COPUTADOR VENCE') elif jogador == computador: print('EMPATE') else: print('Opção inválida')
788
314
import wtdb import unittest class TestWtdbFunctions(unittest.TestCase): def test_n_swaps_zero(self): self.assertEqual( frozenset(), wtdb.n_swaps('foo', 'bar', 0), ) def test_n_swaps_single(self): self.assertSequenceEqual( { frozenset({'bar', 'foo'}), frozenset({'boo', 'far'}), frozenset({'oo', 'fbar'}), frozenset({'bfoo', 'ar'}), }, wtdb.n_swaps('foo', 'bar', 1), ) def test_n_swaps_one_double(self): self.assertSequenceEqual( { frozenset({'strain', 'team'}), frozenset({'train', 'steam'}), frozenset({'srain', 'tteam'}), frozenset({'trsteam', 'ain'}), frozenset({'stain', 'tream'}), frozenset({'tsteam', 'rain'}), frozenset({'sttrain', 'eam'}), frozenset({'sain', 'trteam'}), }, wtdb.n_swaps('steam', 'train', 2), ) def test_order_pair(self): self.assertSequenceEqual( ('national', 'rail'), wtdb.order_pair(('rail', 'national')) ) def test_order_pair_same_length(self): self.assertSequenceEqual( ('steam', 'train'), wtdb.order_pair(('train', 'steam')) ) class TestWordSet(unittest.TestCase): def test_find_swaps_none(self): word_set = wtdb.WordSet() word_set.add('foo') word_set.add('bar') self.assertListEqual([], list(word_set.find_swaps('hello'))) def test_find_swaps_single_letter(self): word_set = wtdb.WordSet() word_set.add('national') word_set.add('rail') word_set.add('rational') self.assertListEqual( [ (('national', 'rail'), ('rational', 'nail')), ], sorted(word_set.find_swaps('nail')), ) def test_find_swaps_double_letter(self): word_set = wtdb.WordSet() word_set.add('steam') word_set.add('train') word_set.add('team') self.assertListEqual( [ (('steam', 'train'), ('strain', 'team')), ], sorted(word_set.find_swaps('strain')), ) def test_validate_ok(self): word_set = wtdb.WordSet() word_set.add('foo') word_set.add('bar') self.assertTrue(word_set.validate('foo', 'bar')) def test_validate_bad(self): word_set = wtdb.WordSet() word_set.add('foo') word_set.add('bar') self.assertFalse(word_set.validate('foo', 'bar', 'foobar')) if __name__ == '__main__': import doctest doctest.testmod(wtdb) unittest.main()
2,708
944
# ENVISIoN # # Copyright (c) 2019 Jesper Ericsson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND # ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ############################################################################################## # TODO: add hdf5 validation import sys,os,inspect import inviwopy import numpy as np import h5py from .LinePlotNetworkHandler import LinePlotNetworkHandler class BandstructureNetworkHandler(LinePlotNetworkHandler): """ Handler class for charge visualization network. Sets up and manages the charge visualization """ def __init__(self, hdf5_path, inviwoApp): LinePlotNetworkHandler.__init__(self, inviwoApp) self.setup_bandstructure_network(hdf5_path) def get_ui_data(self): # Return data required to fill user interface return [ "bandstructure", LinePlotNetworkHandler.get_ui_data(self) ] # ------------------------------------------ # ------- Network building functions ------- def setup_bandstructure_network(self, hdf5_path, xpos=0, ypos=0): with h5py.File(hdf5_path,"r") as h5: # A bool that tells if the band structure should be normalized around the fermi energy. has_fermi_energy = "/FermiEnergy" in h5 # Start building the Inviwo network. h5source = self.add_h5source(hdf5_path, xpos, ypos) ypos += 75 path_selection = self.add_processor("org.inviwo.hdf5.PathSelection", "Select Bandstructure", xpos, ypos) self.network.addConnection(h5source.getOutport("outport"), path_selection.getInport("inport")) # if has_fermi_energy: # fermi_point = self.add_processor("org.inviwo.HDF5ToPoint", "Fermi energy", xpos + 175, ypos) # self.network.addConnection(h5source.getOutport("outport"), # fermi_point.getInport("hdf5HandleFlatMultiInport")) ypos += 75 all_children_processor = self.add_processor("org.inviwo.HDF5PathSelectionAllChildren", "Select all bands", xpos, ypos) self.network.addConnection(path_selection.getOutport("outport"), all_children_processor.getInport("hdf5HandleInport")) ypos += 75 HDF5_to_function = self.add_processor("org.inviwo.HDF5ToFunction", "Convert to function", xpos, ypos) self.network.addConnection(all_children_processor.getOutport("hdf5HandleVectorOutport"), HDF5_to_function.getInport("hdf5HandleFlatMultiInport")) ypos += 75 function_to_dataframe = self.get_processor("Function to dataframe") self.network.addConnection(HDF5_to_function.getOutport("functionVectorOutport"), function_to_dataframe.getInport("functionFlatMultiInport")) # if has_fermi_energy: # self.network.addConnection(fermi_point.getOutport("pointVectorOutport"), # self.get_processor("Line plot").getInport("pointInport")) if has_fermi_energy: self.set_title("Energy - Fermi energy [eV]") else: self.set_title("Energy [eV]") # energy_text_processor.font.fontSize.value = 20 # energy_text_processor.position.value = inviwopy.glm.vec2(0.31, 0.93) # energy_text_processor.color.value = inviwopy.glm.vec4(0,0,0,1) # Start modifying properties. path_selection.selection.value = '/Bandstructure/Bands' # HDF5_to_function.yPathSelectionProperty.value = '/Energy' # self.toggle_all_y(True) self.set_y_selection_type(2) # background_processor.bgColor1.value = inviwopy.glm.vec4(1) # background_processor.bgColor2.value = inviwopy.glm.vec4(1) # canvas_processor.inputSize.dimensions.value = inviwopy.glm.ivec2(900, 700) # if has_fermi_energy: # fermi_point.pathSelectionProperty.value = '/FermiEnergy'
5,423
1,705
#! /usr/bin/env python # coding:utf-8 from __future__ import division, print_function import math # sqlalchemy import sqlalchemy from sqlalchemy.ext.declarative import declarative_base from sqlalchemy import create_engine from sqlalchemy import Column, TEXT, REAL, INTEGER from sqlalchemy.orm import sessionmaker from smt.db.tables import Tables #from pprint import pprint # prepare classes for sqlalchemy class Phrase(declarative_base()): __tablename__ = "phrase" id = Column(INTEGER, primary_key=True) lang1p = Column(TEXT) lang2p = Column(TEXT) class TransPhraseProb(declarative_base()): __tablename__ = "phraseprob" id = Column(INTEGER, primary_key=True) lang1p = Column(TEXT) lang2p = Column(TEXT) p1_2 = Column(REAL) p2_1 = Column(REAL) def phrase_prob(lang1p, lang2p, transfrom=2, transto=1, db="sqlite:///:memory:", init_val=1.0e-10): """ """ engine = create_engine(db) Session = sessionmaker(bind=engine) session = Session() # search query = session.query(TransPhraseProb).filter_by(lang1p=lang1p, lang2p=lang2p) if transfrom == 2 and transto == 1: try: # Be Careful! The order of conditional prob is reversed # as transfrom and transto because of bayes rule return query.one().p2_1 except sqlalchemy.orm.exc.NoResultFound: return init_val elif transfrom == 1 and transto == 2: try: return query.one().p1_2 except sqlalchemy.orm.exc.NoResultFound: return init_val def available_phrases(inputs, transfrom=2, transto=1, db="sqlite:///:memory:"): """ >>> decode.available_phrases(u"He is a teacher.".split(), db_name="sqlite:///:db:")) set([((1, u'He'),), ((1, u'He'), (2, u'is')), ((2, u'is'),), ((2, u'is'), (3, u'a')), ((3, u'a'),), ((4, u'teacher.'),)]) """ engine = create_engine(db) # create session Session = sessionmaker(bind=engine) session = Session() available = set() for i, f in enumerate(inputs): f_rest = () for fr in inputs[i:]: f_rest += (fr,) rest_phrase = u" ".join(f_rest) if transfrom == 2 and transto == 1: query = session.query(Phrase).filter_by(lang2p=rest_phrase) elif transfrom == 1 and transto == 2: query = session.query(Phrase).filter_by(lang1p=rest_phrase) lst = list(query) if lst: available.add(tuple(enumerate(f_rest, i+1))) return available class HypothesisBase(object): def __init__(self, db, totalnumber, sentences, ngram, ngram_words, inputps_with_index, outputps, transfrom, transto, covered, remained, start, end, prev_start, prev_end, remain_phrases, prob, prob_with_cost, prev_hypo, cost_dict ): self._db = db self._totalnumber = totalnumber self._sentences = sentences self._ngram = ngram self._ngram_words = ngram_words self._inputps_with_index = inputps_with_index self._outputps = outputps self._transfrom = transfrom self._transto = transto self._covered = covered self._remained = remained self._start = start self._end = end self._prev_start = prev_start self._prev_end = prev_end self._remain_phrases = remain_phrases self._prob = prob self._prob_with_cost = prob_with_cost self._prev_hypo = prev_hypo self._cost_dict = cost_dict self._output_sentences = outputps @property def db(self): return self._db @property def totalnumber(self): return self._totalnumber @property def sentences(self): return self._sentences @property def ngram(self): return self._ngram @property def ngram_words(self): return self._ngram_words @property def inputps_with_index(self): return self._inputps_with_index @property def outputps(self): return self._outputps @property def transfrom(self): return self._transfrom @property def transto(self): return self._transto @property def covered(self): return self._covered @property def remained(self): return self._remained @property def start(self): return self._start @property def end(self): return self._end @property def prev_start(self): return self._prev_start @property def prev_end(self): return self._prev_end @property def remain_phrases(self): return self._remain_phrases @property def prob(self): return self._prob @property def prob_with_cost(self): return self._prob_with_cost @property def prev_hypo(self): return self._prev_hypo @property def cost_dict(self): return self._cost_dict @property def output_sentences(self): return self._output_sentences def __unicode__(self): d = [("db", self._db), ("sentences", self._sentences), ("inputps_with_index", self._inputps_with_index), ("outputps", self._outputps), ("ngram", self._ngram), ("ngram_words", self._ngram_words), ("transfrom", self._transfrom), ("transto", self._transto), ("covered", self._covered), ("remained", self._remained), ("start", self._start), ("end", self._end), ("prev_start", self._prev_start), ("prev_end", self._prev_end), ("remain_phrases", self._remain_phrases), ("prob", self._prob), ("prob_with_cost", self._prob_with_cost), #("cost_dict", self._cost_dict), #("prev_hypo", ""), ] return u"Hypothesis Object\n" +\ u"\n".join([u" " + k + u": " + unicode(v) for (k, v) in d]) def __str__(self): return unicode(self).encode('utf-8') def __hash__(self): return hash(unicode(self)) class Hypothesis(HypothesisBase): """ Realize like the following class >>> args = {"sentences": sentences, ... "inputps_with_index": phrase, ... "outputps": outputps, ... "covered": hyp0.covered.union(set(phrase)), ... "remained": hyp0.remained.difference(set(phrase)), ... "start": phrase[0][0], ... "end": phrase[-1][0], ... "prev_start": hyp0.start, ... "prev_end": hyp0.end, ... "remain_phrases": remain_phrases(phrase, ... hyp0.remain_phrases), ... "prev_hypo": hyp0 ... } >>> hyp1 = decode.HypothesisBase(**args) """ def __init__(self, prev_hypo, inputps_with_index, outputps, ): start = inputps_with_index[0][0] end = inputps_with_index[-1][0] prev_start = prev_hypo.start prev_end = prev_hypo.end args = {"db": prev_hypo.db, "totalnumber": prev_hypo.totalnumber, "prev_hypo": prev_hypo, "sentences": prev_hypo.sentences, "ngram": prev_hypo.ngram, # set later "ngram_words": prev_hypo.ngram_words, "inputps_with_index": inputps_with_index, "outputps": outputps, "transfrom": prev_hypo.transfrom, "transto": prev_hypo.transto, "covered": prev_hypo.covered.union(set(inputps_with_index)), "remained": prev_hypo.remained.difference( set(inputps_with_index)), "start": start, "end": end, "prev_start": prev_start, "prev_end": prev_end, "remain_phrases": self._calc_remain_phrases( inputps_with_index, prev_hypo.remain_phrases), "cost_dict": prev_hypo.cost_dict, # set later "prob": 0, "prob_with_cost": 0, } HypothesisBase.__init__(self, **args) # set ngram words self._ngram_words = self._set_ngram_words() # set the exact probability self._prob = self._cal_prob(start - prev_end) # set the exact probability with cost self._prob_with_cost = self._cal_prob_with_cost(start - prev_end) # set the output phrases self._output_sentences = prev_hypo.output_sentences + outputps def _set_ngram_words(self): lst = self._prev_hypo.ngram_words + list(self._outputps) o_len = len(self._outputps) return list(reversed(list(reversed(lst))[:o_len - 1 + self._ngram])) def _cal_phrase_prob(self): inputp = u" ".join(zip(*self._inputps_with_index)[1]) outputp = u" ".join(self._outputps) if self._transfrom == 2 and self._transto == 1: return phrase_prob(lang1p=outputp, lang2p=inputp, transfrom=self._transfrom, transto=self._transto, db=self._db, init_val=-100) elif self._transfrom == 1 and self._transto == 2: return phrase_prob(lang1p=inputp, lang2p=outputp, transfrom=self._transfrom, transto=self._transto, db=self._db, init_val=-100) else: raise Exception("specify transfrom and transto") def _cal_language_prob(self): nw = self.ngram_words triwords = zip(nw, nw[1:], nw[2:]) prob = 0 for first, second, third in triwords: prob += language_model(first, second, third, self._totalnumber, transto=self._transto, db=self._db) return prob def _cal_prob(self, dist): val = self._prev_hypo.prob +\ self._reordering_model(0.1, dist) +\ self._cal_phrase_prob() +\ self._cal_language_prob() return val def _sub_cal_prob_with_cost(self, s_len, cvd): insert_flag = False lst = [] sub_lst = [] for i in range(1, s_len+1): if i not in cvd: insert_flag = True else: insert_flag = False if sub_lst: lst.append(sub_lst) sub_lst = [] if insert_flag: sub_lst.append(i) else: if sub_lst: lst.append(sub_lst) return lst def _cal_prob_with_cost(self, dist): s_len = len(self._sentences) cvd = set(i for i, val in self._covered) lst = self._sub_cal_prob_with_cost(s_len, cvd) prob = self._cal_prob(dist) prob_with_cost = prob for item in lst: start = item[0] end = item[-1] cost = self._cost_dict[(start, end)] prob_with_cost += cost return prob_with_cost def _reordering_model(self, alpha, dist): return math.log(math.pow(alpha, math.fabs(dist))) def _calc_remain_phrases(self, phrase, phrases): """ >>> res = remain_phrases(((2, u'is'),), set([((1, u'he'),), ((2, u'is'),), ((3, u'a'),), ((2, u'is'), (3, u'a')), ((4, u'teacher'),)])) set([((1, u'he'),), ((3, u'a'),), ((4, u'teacher'),)]) >>> res = remain_phrases(((2, u'is'), (3, u'a')), set([((1, u'he'),), ((2, u'is'),), ((3, u'a'),), ((2, u'is'), (3, u'a')), ((4, u'teacher'),)])) set([((1, u'he'),), ((4, u'teacher'),)]) """ s = set() for ph in phrases: for p in phrase: if p in ph: break else: s.add(ph) return s def create_empty_hypothesis(sentences, cost_dict, ngram=3, transfrom=2, transto=1, db="sqlite:///:memory:"): phrases = available_phrases(sentences, db=db) hyp0 = HypothesisBase(sentences=sentences, db=db, totalnumber=_get_total_number(transto=transto, db=db), inputps_with_index=(), outputps=[], ngram=ngram, ngram_words=["</s>", "<s>"]*ngram, transfrom=transfrom, transto=transto, covered=set(), start=0, end=0, prev_start=0, prev_end=0, remained=set(enumerate(sentences, 1)), remain_phrases=phrases, prev_hypo=None, prob=0, cost_dict=cost_dict, prob_with_cost=0) #print(_get_total_number(transto=transto, db=db)) return hyp0 class Stack(set): def __init__(self, size=10, histogram_pruning=True, threshold_pruning=False): set.__init__(self) self._min_hyp = None self._max_hyp = None self._size = size self._histogram_pruning = histogram_pruning self._threshold_pruning = threshold_pruning def add_hyp(self, hyp): #prob = hyp.prob # for the first time if self == set([]): self._min_hyp = hyp self._max_hyp = hyp else: raise Exception("Don't use add_hyp for nonempty stack") #else: # if self._min_hyp.prob > prob: # self._min_hyp = hyp # if self._max_hyp.prob < prob: # self._max_hyp = hyp self.add(hyp) def _get_min_hyp(self): # set value which is more than 1 lst = list(self) mn = lst[0] for item in self: if item.prob_with_cost < mn.prob_with_cost: mn = item return mn def add_with_combine_prune(self, hyp): prob_with_cost = hyp.prob_with_cost if self == set([]): self._min_hyp = hyp self._max_hyp = hyp else: if self._min_hyp.prob_with_cost > prob_with_cost: self._min_hyp = hyp if self._max_hyp.prob_with_cost < prob_with_cost: self._max_hyp = hyp self.add(hyp) # combine for _hyp in self: if hyp.ngram_words[:-1] == _hyp.ngram_words[:-1] and \ hyp.end == hyp.end: if hyp.prob_with_cost > _hyp: self.remove(_hyp) self.add(hyp) break # histogram pruning if self._histogram_pruning: if len(self) > self._size: self.remove(self._min_hyp) self._min_hyp = self._get_min_hyp() # threshold pruning if self._threshold_pruning: alpha = 1.0e-5 if hyp.prob_with_cost < self._max_hyp + math.log(alpha): self.remove(hyp) def _get_total_number(transto=1, db="sqlite:///:memory:"): """ return v """ Trigram = Tables().get_trigram_table('lang{}trigram'.format(transto)) # create connection in SQLAlchemy engine = create_engine(db) # create session Session = sessionmaker(bind=engine) session = Session() # calculate total number query = session.query(Trigram) return len(list(query)) def language_model(first, second, third, totalnumber, transto=1, db="sqlalchemy:///:memory:"): class TrigramProb(declarative_base()): __tablename__ = 'lang{}trigramprob'.format(transto) id = Column(INTEGER, primary_key=True) first = Column(TEXT) second = Column(TEXT) third = Column(TEXT) prob = Column(REAL) class TrigramProbWithoutLast(declarative_base()): __tablename__ = 'lang{}trigramprob'.format(transto) id = Column(INTEGER, primary_key=True) first = Column(TEXT) second = Column(TEXT) prob = Column(REAL) # create session engine = create_engine(db) Session = sessionmaker(bind=engine) session = Session() try: # next line can raise error if the prob is not found query = session.query(TrigramProb).filter_by(first=first, second=second, third=third) item = query.one() return item.prob except sqlalchemy.orm.exc.NoResultFound: query = session.query(TrigramProbWithoutLast ).filter_by(first=first, second=second) # I have to modify the database item = query.first() if item: return item.prob else: return - math.log(totalnumber) class ArgumentNotSatisfied(Exception): pass def _future_cost_estimate(sentences, phrase_prob): ''' warning: pass the complete one_word_prob ''' s_len = len(sentences) cost = {} one_word_prob = {(st, ed): prob for (st, ed), prob in phrase_prob.items() if st == ed} if set(one_word_prob.keys()) != set((x, x) for x in range(1, s_len+1)): raise ArgumentNotSatisfied("phrase_prob doesn't satisfy the condition") # add one word prob for tpl, prob in one_word_prob.items(): index = tpl[0] cost[(index, index)] = prob for length in range(1, s_len+1): for start in range(1, s_len-length+1): end = start + length try: cost[(start, end)] = phrase_prob[(start, end)] except KeyError: cost[(start, end)] = -float('inf') for i in range(start, end): _val = cost[(start, i)] + cost[(i+1, end)] if _val > cost[(start, end)]: cost[(start, end)] = _val return cost def _create_estimate_dict(sentences, phrase_prob, init_val=-100): one_word_prob_dict_nums = set(x for x, y in phrase_prob.keys() if x == y) comp_dic = {} # complete the one_word_prob s_len = len(sentences) for i in range(1, s_len+1): if i not in one_word_prob_dict_nums: comp_dic[(i, i)] = init_val for key, val in phrase_prob.items(): comp_dic[key] = val return comp_dic def _get_total_number_for_fce(transto=1, db="sqlite:///:memory:"): """ return v """ # create connection in SQLAlchemy engine = create_engine(db) # create session Session = sessionmaker(bind=engine) session = Session() tablename = 'lang{}unigram'.format(transto) Unigram = Tables().get_unigram_table(tablename) # calculate total number query = session.query(Unigram) sm = 0 totalnumber = 0 for item in query: totalnumber += 1 sm += item.count return {'totalnumber': totalnumber, 'sm': sm} def _future_cost_langmodel(word, tn, transfrom=2, transto=1, alpha=0.00017, db="sqlite:///:memory:"): tablename = "lang{}unigramprob".format(transto) # create session engine = create_engine(db) Session = sessionmaker(bind=engine) session = Session() UnigramProb = Tables().get_unigramprob_table(tablename) query = session.query(UnigramProb).filter_by(first=word) try: item = query.one() return item.prob except sqlalchemy.orm.exc.NoResultFound: sm = tn['sm'] totalnumber = tn['totalnumber'] return math.log(alpha) - math.log(sm + alpha*totalnumber) def future_cost_estimate(sentences, transfrom=2, transto=1, init_val=-100.0, db="sqlite:///:memory:"): # create phrase_prob table engine = create_engine(db) # create session Session = sessionmaker(bind=engine) session = Session() phrases = available_phrases(sentences, db=db) tn = _get_total_number_for_fce(transto=transto, db=db) covered = {} for phrase in phrases: phrase_str = u" ".join(zip(*phrase)[1]) if transfrom == 2 and transto == 1: query = session.query(TransPhraseProb).filter_by( lang2p=phrase_str).order_by( sqlalchemy.desc(TransPhraseProb.p2_1)) elif transfrom == 1 and transto == 2: query = session.query(TransPhraseProb).filter_by( lang1p=phrase_str).order_by( sqlalchemy.desc(TransPhraseProb.p1_2)) lst = list(query) if lst: # extract the maximum val val = query.first() start = zip(*phrase)[0][0] end = zip(*phrase)[0][-1] pos = (start, end) if transfrom == 2 and transto == 1: fcl = _future_cost_langmodel(word=val.lang1p.split()[0], tn=tn, transfrom=transfrom, transto=transto, alpha=0.00017, db=db) print(val.lang1p.split()[0], fcl) covered[pos] = val.p2_1 + fcl if transfrom == 1 and transto == 2: covered[pos] = val.p1_2 # + language_model() # estimate future costs phrase_prob = _create_estimate_dict(sentences, covered) print(phrase_prob) return _future_cost_estimate(sentences, phrase_prob) def stack_decoder(sentence, transfrom=2, transto=1, stacksize=10, searchsize=10, lang1method=lambda x: x, lang2method=lambda x: x, db="sqlite:///:memory:", verbose=False): # create phrase_prob table engine = create_engine(db) # create session Session = sessionmaker(bind=engine) session = Session() if transfrom == 2 and transto == 1: sentences = lang2method(sentence).split() else: sentences = lang1method(sentence).split() # create stacks len_sentences = len(sentences) stacks = [Stack(size=stacksize, histogram_pruning=True, threshold_pruning=False, ) for i in range(len_sentences+1)] cost_dict = future_cost_estimate(sentences, transfrom=transfrom, transto=transto, db=db) #create the initial hypothesis hyp0 = create_empty_hypothesis(sentences=sentences, cost_dict=cost_dict, ngram=3, transfrom=2, transto=1, db=db) stacks[0].add_hyp(hyp0) # main loop for i, stack in enumerate(stacks): for hyp in stack: for phrase in hyp.remain_phrases: phrase_str = u" ".join(zip(*phrase)[1]) if transfrom == 2 and transto == 1: query = session.query(TransPhraseProb).filter_by( lang2p=phrase_str).order_by( sqlalchemy.desc(TransPhraseProb.p2_1))[:searchsize] elif transfrom == 1 and transto == 2: query = session.query(TransPhraseProb).filter_by( lang1p=phrase_str).order_by( sqlalchemy.desc(TransPhraseProb.p1_2))[:searchsize] query = list(query) for item in query: if transfrom == 2 and transto == 1: outputp = item.lang1p elif transfrom == 1 and transto == 2: outputp = item.lang2p #print(u"calculating\n {0} = {1}\n in stack {2}".format( # phrase, outputp, i)) if transfrom == 2 and transto == 1: outputps = lang1method(outputp).split() elif transfrom == 1 and transto == 2: outputps = lang2method(outputp).split() # place in stack # and recombine with existing hypothesis if possible new_hyp = Hypothesis(prev_hypo=hyp, inputps_with_index=phrase, outputps=outputps) if verbose: print(phrase, u' '.join(outputps)) print("loop: ", i, "len:", len(new_hyp.covered)) stacks[len(new_hyp.covered)].add_with_combine_prune( new_hyp) return stacks if __name__ == '__main__': #import doctest #doctest.testmod() pass
26,442
7,963
#!/usr/bin/env python3 # Copyright 2018 Lael D. Barlow # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Contains functions for generating histograms. """ import sys import os import re import pylab import subprocess import numpy as np def generate_histogram(title, values, num_bins, output_filename ): """Take a list of values, a number of bins, and an output file name, and generate a histogram using pylab and write it to the file path. """ # Make histogram of scores. pylab.hist(values, bins=num_bins) #specify the number of bins for the histogram pylab.title(title) pylab.xlabel("Value") pylab.ylabel("Number of values in bin") #pylab.show() #can do this instead of the savefig method if just want to view pylab.savefig(output_filename) #works for pdf or png pylab.close() def generate_double_histogram(title, values1, label1, values2, label2, num_bins, output_filename ): """Take two lists of values, a number of bins, and an output file name, and generate a histogram using pylab and write it to the file path. """ pylab.style.use('seaborn-deep') # Make histogram of scores. #pylab.hist(values1, bins=num_bins, label=label1) #specify the number of bins for the histogram #pylab.hist(values2, bins=num_bins, label=label2) #specify the number of bins for the histogram pylab.hist([values1, values2], bins=num_bins, label=[label1, label2]) #specify the number of bins for the histogram pylab.title(title) pylab.xlabel("Value") pylab.ylabel("Number of values in bin") pylab.legend(loc='upper right') #pylab.show() #can do this instead of the savefig method if just want to view pylab.savefig(output_filename) #works for pdf or png pylab.close() def autolabel_bars(rects, ax): """Attach a text label above each bar in *rects*, displaying its height.""" for rect in rects: height = rect.get_height() ax.annotate('{}'.format(height), xy=(rect.get_x() + rect.get_width() / 2, height), xytext=(0, 3), # 3 points vertical offset textcoords="offset points", ha='center', va='bottom') def generate_bar_chart(title, categories, labels, num_hits, output_filename ): """Take data and use matplotlib to generate a bar chart and write to a specified file path. """ ## Simple bar chart. #fig, ax = pylab.subplots() #pylab.style.use('seaborn-deep') #pylab.rcdefaults() #fig, ax = pylab.subplots() # Example data #x_pos = np.arange(len(labels)) #ax.barh(y_pos, performance, xerr=error, align='center') #ax.bar(x_pos, values, align='center') #ax.set_xticks(x_pos) #ax.set_xticklabels(labels) #ax.set_ylabel('Positive hit count') #ax.set_title(title) #pylab.show() #pylab.close() pylab.style.use('seaborn-deep') #categories = ['Prot', 'Nucl'] #labels = ['Non-redundant', 'Final positive'] #num_hits = [[35, 30], # [12, 6]] assert len(labels) == len(num_hits) for sublist in num_hits: assert len(sublist) == len(categories) x = np.arange(len(labels)) # the label locations width = 0.35 # the width of the bars fig, ax = pylab.subplots() rects_list = [] num = 0 for category, sublist in zip(categories, num_hits): num += 1 if not (num % 2) == 0: rects = ax.bar(x - width/2, sublist, width, label=category) rects_list.append(rects) else: rects = ax.bar(x + width/2, sublist, width, label=category) rects_list.append(rects) # Add numbers to label individual bars. for r in rects_list: autolabel_bars(r, ax) # Add some text for labels, title and custom x-axis tick labels, etc. ax.set_ylabel('Number of sequences') ax.set_title(title) ax.set_xticks(x) ax.set_xticklabels(labels) ax.legend() fig.tight_layout() #pylab.show() pylab.savefig(output_filename) #works for pdf or png pylab.close() if __name__ == '__main__': # Generate example plot. ## Define title for plot. #title = "Histogram of random values in 30 bins" ## Define input data for example plot. #mu, sigma = 0, 0.1 # mean and standard deviation #s = np.random.normal(mu, sigma, 1000) ## Define output filepath. #output_filepath = 'test_histogram_plot.pdf' ## Call function to generate plot. #generate_histogram(title, # s, # 30, # output_filepath # ) ## Open output file. #subprocess.call(['open', output_filepath]) ## Delete output file. #os.remove(output_filepath) # #title = 'test bar chart' #values = [20, 10] #labels = ['prot', 'nucl'] #output_filename = 'test_bar_chart.pdf' #generate_bar_chart(title, # values, # labels, # output_filename # ) ## Open output file. #subprocess.call(['open', output_filepath]) ## Delete output file. #os.remove(output_filepath) # Test bar chart. title = 'test bar chart' categories = ['Prot', 'Nucl'] labels = ['Non-redundant', 'Final positive'] num_hits = [[35, 30], [12, 6]] output_filename = 'test_bar_chart.pdf' generate_bar_chart(title, categories, labels, num_hits, output_filename ) # Open output file. subprocess.call(['open', output_filename]) # Delete output file. os.remove(output_filename)
6,711
2,138
import os import os.path as osp import logging import argparse import math import yaml from tabulate import tabulate from torch.utils.data import Dataset from tqdm import tqdm from typing import Tuple, List import torch import torch.nn as nn import torch.nn.functional as functional import torch.distributed as dist from fast_segmentation.core.utils import get_next_file_name, delete_directory_content from fast_segmentation.model_components.architectures import model_factory from fast_segmentation.model_components.data_cv2 import get_data_loader from fast_segmentation.model_components.logger import setup_logger from fast_segmentation.core.consts import IGNORE_LABEL, NUM_CLASSES, BAD_IOU from fast_segmentation.visualization.visualize import save_labels_mask_with_legend def parse_args(): """ Creates the parser for evaluation arguments Returns: The parser """ parse = argparse.ArgumentParser() parse.add_argument('--local_rank', dest='local_rank', type=int, default=-1) parse.add_argument('--weight-path', dest='weight_pth', type=str, default='/home/bina/PycharmProjects/fast-segmentation/models/8/best_model.pth') parse.add_argument('--im_root', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data') parse.add_argument('--val_im_anns', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/val.txt') parse.add_argument('--false_analysis_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/data/false_analysis') parse.add_argument('--log_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/logs/regular_logs') parse.add_argument('--port', dest='port', type=int, default=44553, ) parse.add_argument('--model', dest='model', type=str, default='bisenetv2') parse.add_argument('--config_path', type=str, default='/home/bina/PycharmProjects/fast-segmentation/configs/main_cfg.yaml') return parse.parse_args() class MscEvalV0(object): """ """ def __init__(self, scales=(1.,), flip=False, ignore_label=IGNORE_LABEL): self.scales = scales self.flip = flip self.ignore_label = ignore_label def __call__(self, net: nn.Module, data_loader, num_classes): # evaluate hist = torch.zeros(num_classes, num_classes).cuda().detach() if dist.is_initialized() and dist.get_rank() != 0: d_iter = enumerate(data_loader) else: d_iter = enumerate(tqdm(data_loader)) for i, (imgs, labels) in d_iter: n, _, h, w = labels.shape labels = labels.squeeze(1).cuda() size = labels.size()[-2:] probs = torch.zeros((n, num_classes, h, w), dtype=torch.float32).cuda().detach() for scale in self.scales: s_h, s_w = int(scale * h), int(scale * w) im_sc = functional.interpolate(imgs, size=(s_h, s_w), mode='bilinear', align_corners=True) im_sc = im_sc.cuda() if self.flip: im_sc = torch.flip(im_sc, dims=(3,)) logits = net(im_sc)[0] if self.flip: logits = torch.flip(logits, dims=(3,)) logits = functional.interpolate(logits, size=size, mode='bilinear', align_corners=True) probs += torch.softmax(logits, dim=1) # calc histogram of the predictions in each class preds = torch.argmax(probs, dim=1) relevant_labels = labels != self.ignore_label hist += torch.bincount(labels[relevant_labels] * num_classes + preds[relevant_labels], minlength=num_classes ** 2).view(num_classes, num_classes) if dist.is_initialized(): dist.all_reduce(hist, dist.ReduceOp.SUM) # diagonal is the intersection and the ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag() + 1e-6) ious[ious != ious] = 0 # replace nan with zero miou = ious.mean() return miou.item() class MscEvalCrop(object): def __init__(self, crop_size: Tuple[int, int], crop_stride: float, false_analysis_path: str, flip: bool = True, scales: Tuple = (0.5, 0.75, 1, 1.25, 1.5, 1.75), label_ignore: int = IGNORE_LABEL): self.scales = scales self.ignore_label = label_ignore self.flip = flip self.distributed = dist.is_initialized() self.crop_size = crop_size if isinstance(crop_size, (list, tuple)) else (crop_size, crop_size) self.crop_stride = crop_stride self.false_analysis_path = false_analysis_path def pad_tensor(self, in_tensor: torch.Tensor): n, c, h, w = in_tensor.size() crop_h, crop_w = self.crop_size if crop_h < h and crop_w < w: return in_tensor, [0, h, 0, w] pad_h, pad_w = max(crop_h, h), max(crop_w, w) out_tensor = torch.zeros(n, c, pad_h, pad_w).cuda() out_tensor.requires_grad_(False) margin_h, margin_w = pad_h - h, pad_w - w hst, hed = margin_h // 2, margin_h // 2 + h wst, wed = margin_w // 2, margin_w // 2 + w out_tensor[:, :, hst:hed, wst:wed] = in_tensor return out_tensor, [hst, hed, wst, wed] def eval_chip(self, net: nn.Module, crop: torch.Tensor): prob = net(crop)[0].softmax(dim=1) if self.flip: crop = torch.flip(crop, dims=(3,)) prob += net(crop)[0].flip(dims=(3,)).softmax(dim=1) prob = torch.exp(prob) return prob def crop_eval(self, net: nn.Module, im: torch.Tensor, n_classes: int): crop_h, crop_w = self.crop_size stride_rate = self.crop_stride im, indices = self.pad_tensor(im) n, c, h, w = im.size() stride_h = math.ceil(crop_h * stride_rate) stride_w = math.ceil(crop_w * stride_rate) n_h = math.ceil((h - crop_h) / stride_h) + 1 n_w = math.ceil((w - crop_w) / stride_w) + 1 prob = torch.zeros(n, n_classes, h, w).cuda() prob.requires_grad_(False) for i in range(n_h): for j in range(n_w): st_h, st_w = stride_h * i, stride_w * j end_h, end_w = min(h, st_h + crop_h), min(w, st_w + crop_w) st_h, st_w = end_h - crop_h, end_w - crop_w chip = im[:, :, st_h:end_h, st_w:end_w] prob[:, :, st_h:end_h, st_w:end_w] += self.eval_chip(net, chip) hst, hed, wst, wed = indices prob = prob[:, :, hst:hed, wst:wed] return prob def scale_crop_eval(self, net: nn.Module, im: torch.Tensor, scale: Tuple, n_classes: int): n, c, h, w = im.size() new_hw = [int(h * scale), int(w * scale)] im = functional.interpolate(im, new_hw, mode='bilinear', align_corners=True) prob = self.crop_eval(net, im, n_classes) prob = functional.interpolate(prob, (h, w), mode='bilinear', align_corners=True) return prob @torch.no_grad() def __call__(self, net: nn.Module, dl: Dataset, n_classes: int): data_loader = dl if self.distributed and not dist.get_rank() == 0 else tqdm(dl) hist = torch.zeros(n_classes, n_classes).cuda().detach() hist.requires_grad_(False) for i, (images, labels) in enumerate(data_loader): images = images.cuda() labels = labels.squeeze(1).cuda() n, h, w = labels.shape probs = torch.zeros((n, n_classes, h, w)).cuda() probs.requires_grad_(False) for sc in self.scales: probs += self.scale_crop_eval(net, images, sc, n_classes) torch.cuda.empty_cache() preds = torch.argmax(probs, dim=1) keep = labels != self.ignore_label cur_hist = torch.zeros(n_classes, n_classes).cuda().detach() bin_count = torch.bincount(labels[keep] * n_classes + preds[keep], minlength=n_classes ** 2). \ view(n_classes, n_classes) cur_hist += bin_count cur_miou = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag()) cur_miou[cur_miou != cur_miou] = 0 # replace nan with zero cur_miou = cur_miou.mean() if cur_miou < BAD_IOU: save_in_false_analysis(preds=preds, labels=labels, path=self.false_analysis_path) hist += bin_count if self.distributed: dist.all_reduce(hist, dist.ReduceOp.SUM) ious = hist.diag() / (hist.sum(dim=0) + hist.sum(dim=1) - hist.diag()) ious[ious != ious] = 0 # replace nan with zero miou = ious.mean() return miou.item() def save_in_false_analysis(preds: torch.Tensor, labels: torch.Tensor, path: str): delete_directory_content(path) for i, (pred, label) in enumerate(zip(preds, labels)): pred = pred.detach().cpu().numpy() label = label.detach().cpu().numpy() label_path = get_next_file_name(root_dir=path, prefix='label', suffix='.jpg') pred_path = get_next_file_name(root_dir=path, prefix='pred', suffix='.jpg') save_labels_mask_with_legend(mask=pred, save_path=pred_path) save_labels_mask_with_legend(mask=label, save_path=label_path) @torch.no_grad() def eval_model(net: nn.Module, ims_per_gpu: int, crop_size: Tuple[int, int], im_root: str, im_anns: str, false_analysis_path: str) -> Tuple[List[str], List[float]]: is_dist = dist.is_initialized() dl = get_data_loader(data_path=im_root, ann_path=im_anns, ims_per_gpu=ims_per_gpu, crop_size=crop_size, mode='val', distributed=is_dist) net.eval() heads, mious = [], [] logger = logging.getLogger() single_scale = MscEvalV0((1.,), False) miou = single_scale(net, dl, NUM_CLASSES) heads.append('single_scale') mious.append(miou) logger.info('single mIOU is: %s\n', miou) single_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=False, scales=(1.,), label_ignore=IGNORE_LABEL, false_analysis_path=false_analysis_path) miou = single_crop(net, dl, NUM_CLASSES) heads.append('single_scale_crop') mious.append(miou) logger.info('single scale crop mIOU is: %s\n', miou) ms_flip = MscEvalV0((0.5, 0.75, 1, 1.25, 1.5, 1.75), True) miou = ms_flip(net, dl, NUM_CLASSES) heads.append('ms_flip') mious.append(miou) logger.info('ms flip mIOU is: %s\n', miou) ms_flip_crop = MscEvalCrop(crop_size=crop_size, crop_stride=2. / 3, flip=True, scales=(0.5, 0.75, 1.0, 1.25, 1.5, 1.75), label_ignore=IGNORE_LABEL, false_analysis_path=false_analysis_path) miou = ms_flip_crop(net, dl, NUM_CLASSES) heads.append('ms_flip_crop') mious.append(miou) logger.info('ms crop mIOU is: %s\n', miou) return heads, mious def evaluate(ims_per_gpu: int, crop_size: Tuple[int, int], weight_pth: str, model_type: str, im_root: str, val_im_anns: str, false_analysis_path: str): logger = logging.getLogger() # model logger.info('setup and restore model') net = model_factory[model_type](NUM_CLASSES) net.load_state_dict(torch.load(weight_pth)) net.cuda() is_dist = dist.is_initialized() if is_dist: local_rank = dist.get_rank() net = nn.parallel.DistributedDataParallel(net, device_ids=[local_rank, ], output_device=local_rank) # evaluator heads, mious = eval_model(net=net, ims_per_gpu=ims_per_gpu, im_root=im_root, im_anns=val_im_anns, false_analysis_path=false_analysis_path, crop_size=crop_size) logger.info(tabulate([mious], headers=heads, tablefmt='orgtbl')) if __name__ == "__main__": args = parse_args() with open(args.config_path) as f: cfg = yaml.load(f, Loader=yaml.FullLoader) if not args.local_rank == -1: torch.cuda.set_device(args.local_rank) dist.init_process_group(backend='nccl', init_method='tcp://127.0.0.1:{}'.format(args.port), world_size=torch.cuda.device_count(), rank=args.local_rank ) if not osp.exists(args.log_path): os.makedirs(args.log_path) setup_logger('{}-eval'.format(args.model), args.log_path) evaluate(ims_per_gpu=cfg['ims_per_gpu'], crop_size=cfg['crop_size'], weight_pth=args.weight_pth, model_type=args.model, im_root=args.im_root, val_im_anns=args.val_im_anns, false_analysis_path=args.false_analysis_path)
12,838
4,668
from django.conf.urls import url from . import views app_name = 'talent' urlpatterns = [ url(r'^$', views.IndexView.as_view(), name='index'), url(r'^musicians/$', views.MusicianIndex.as_view(), name='musicians'), url(r'^musicians/(?P<pk>[0-9]+)/$', views.MusicianDetail.as_view(), name='musician-detail'), url(r'^musicians/(?P<pk>\d+)/edit/$', views.UpdateMusician.as_view(), name='musician-edit'), url(r'^musicians/create/$', views.MusicianCreate.as_view(), name='musician-create'), url(r'^musicians/(?P<pk>\d+)/delete/$', views.DeleteTalent.as_view(), name='musician-delete'), url(r'^artists/$', views.ArtistIndex.as_view(), name='artists'), url(r'^artists/(?P<pk>[0-9]+)/$', views.ArtistDetail.as_view(), name='artist-detail'), url(r'^artists/(?P<pk>\d+)/edit/$', views.UpdateArtist.as_view(), name='artist-edit'), url(r'^artists/create/$', views.ArtistCreate.as_view(), name='artist-create'), ]
940
360
import discord from discord.ext.modules import ModularCommandClient if __name__ == "__main__": client = ModularCommandClient(intents=discord.Intents.none()) @client.event async def on_ready(): print("Logged on as {0}!".format(client.user)) client.load_extension("commands.hello_module") client.load_extension("commands.advanced_module") client.run("your_bot_token")
402
127
# Copyright 2019 Benjamin Santos # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # -*- coding: utf-8 -*- """ This module contains the classes functions and helpers to compute the plasma. """ __author__ = "Benjamin Santos" __copyright__ = "Copyright 2019" __credits__ = ["Benjamin Santos"] __license__ = "Apache 2.0" __version__ = "0.0.1" __maintainer__ = "Benjamin Santos" __email__ = "caos21@gmail.com" __status__ = "Beta" from collections import namedtuple import numpy as np import scipy.constants as const import trazar as tzr PI = const.pi KE = 1.0/(4.0*PI*const.epsilon_0) INVKE = 1.0/KE KB = const.Boltzmann QE = const.elementary_charge ME = const.electron_mass PlasmaSystem = namedtuple('System', ['length', 'radius', 'temperature', 'ion_temperature', 'pressure_torr', 'arsih4_ratio', 'armass', 'sih4mass', 'power', 'with_tunnel']) def constant_rate(energy, avar, bvar, cvar): """ Returns a constant rate a """ return avar*np.ones_like(energy) def arrhenius_rate(energy, avar, bvar, cvar): """ Returns the Arrhenius rate """ return avar * np.power(energy, cvar) * np.exp(-bvar/energy) def a1expb_rate(energy, avar, bvar, cvar): """ Returns a1expb rate """ return avar * (1.0 - np.exp(-bvar*energy)) class RateSpec: """ Defines a rate """ def __init__(self, rate_function=None, avar=0.0, bvar=0.0, cvar=0.0, name=""): self.rate_function = rate_function self.avar = avar self.bvar = bvar self.cvar = cvar self.name = name def __call__(self, energy): """ Returns the rate at mean electron energy value """ return self.rate_function(energy, self.avar, self.bvar, self.cvar) class RatesMap: """ Returns a dict of rates """ def __init__(self, rates_dict): """ """ self.rates_dict = rates_dict self.rates_map = dict() def get_ratesmap(self): """ Get the rates map """ for k, var in self.rates_dict.items(): if var[0] == "a1expb": self.rates_map[k] = RateSpec(a1expb_rate, var[1], var[2], var[3], k) if var[0] == "arrhenius": self.rates_map[k] = RateSpec(arrhenius_rate, var[1], var[2], var[3], k) if var[0] == "constant": self.rates_map[k] = RateSpec(constant_rate, var[1], var[2], var[3], k) return self.rates_map def plot_rates(self, energy, savename="figx.eps"): """ Plot the rates """ rates, labels = [], [] for k, var in self.rates_map.items(): rates.append(var(energy)) labels.append(var.name) tzr.plot_plain(energy, rates, title="Rates", axislabel=["Time (s)", r"Rate coefficient (m$^{3}$s$^{-1}$)"], logx=False, logy=True, labels=labels, ylim=[1e-18, 1e-12], savename=savename) class PlasmaChem(): """ Plasma model """ def __init__(self, rates_map, plasmasystem): self.rates_map = rates_map self.plasmasystem = plasmasystem self.electron_density = 1.0 self.nano_qdens = 0.0 self.nano_qdens_rate = 0.0 self.kbtg = KB * self.plasmasystem.temperature self.ion_kbtg = KB * self.plasmasystem.ion_temperature self.pressure = 133.32237 * self.plasmasystem.pressure_torr self.reactor_volume = (self.plasmasystem.length*PI*self.plasmasystem.radius *self.plasmasystem.radius) self.reactor_area = self.plasmasystem.length*2.0*PI*self.plasmasystem.radius self.ratio_av = self.reactor_area / self.reactor_volume self.gas_dens = self.pressure / self.kbtg self.nar = self.plasmasystem.arsih4_ratio * self.gas_dens self.nsih4 = (1.0-self.plasmasystem.arsih4_ratio) * self.gas_dens self.vth_ar = self.thermal_velocity(self.plasmasystem.armass) self.vth_sih4 = self.thermal_velocity(self.plasmasystem.sih4mass) self.flux_sih3 = self.flux_neutrals(self.plasmasystem.sih4mass) self.flux_sih2 = self.flux_neutrals(self.plasmasystem.sih4mass) self.flux_ar = self.flux_neutrals(self.plasmasystem.armass) ## From Lieberman pag 80 (117) self.lambdai = 1. / (330 * self.plasmasystem.pressure_torr) self.flux_arp = self.flux_ions(self.plasmasystem.armass, self.lambdai) self.flux_sih3p = self.flux_ions(self.plasmasystem.sih4mass, 2.9e-3) ## peak voltage self.vsheath = 0.25*100.0 self.density_sourcedrain = np.zeros(7) self.past_plasmadensity = np.ones(7) self.next_plasmadensity = np.zeros(7) def thermal_velocity(self, mass): """ computes the thermal velocity """ return np.sqrt(2.0*self.kbtg/mass) def diffusion_neutrals(self, mass, lambdax=3.5*1e-3): """ computes the diffusion coefficient for neutrals """ return self.kbtg*lambdax/(mass*self.thermal_velocity(mass)) def center2edge_neutrals(self, mass): """ center to edge ratio for neutrals """ pfcn = (1.0 + (self.plasmasystem.length/2.0) * self.thermal_velocity(mass) / (4.0*self.diffusion_neutrals(mass))) return 1.0/pfcn def flux_neutrals(self, mass): """ computes the neutral flux """ return 0.25 * self.center2edge_neutrals(mass) * self.thermal_velocity(mass) def bohm_velocity(self, mass): """ computes the Bohm velocity """ return np.sqrt(self.ion_kbtg/mass) def center2edge_ions(self, lambdax): """ center to edge ratio for ions """ pfcn = np.sqrt(3.0+(0.5*self.plasmasystem.length/lambdax)) return 1.0/pfcn def flux_ions(self, mass, lambdax): """ computes the ion flux """ return self.center2edge_ions(lambdax) * self.bohm_velocity(mass) def ion_velocity(self, mass): """ computes the ion velocity """ return np.sqrt(8.0*self.ion_kbtg/(PI*mass)) def get_system(self): """ returns the system of equations """ return self.system def system(self, time, nvector): """ system of equations for the densities """ nel = nvector[0] narp = nvector[1] narm = nvector[2] nsih3p = nvector[3] nsih3 = nvector[4] nsih2 = nvector[5] neps = nvector[6] energy = neps/nel kel = self.rates_map["R1:kel"](energy) kio = self.rates_map["R2:ki"](energy) kex = self.rates_map["R3:kex"](energy) kiarm = self.rates_map["R4:kiarm"](energy) kelsih4 = self.rates_map["R5:kelsih4"](energy) kdisih4 = self.rates_map["R6:kdisih4"](energy) kdsih3 = self.rates_map["R7:kdsih3"](energy) kdsih2 = self.rates_map["R8:kdsih2"](energy) kisih3 = self.rates_map["R9:kisih3"](energy) kv13 = self.rates_map["R10:kv13"](energy) kv24 = self.rates_map["R11:kv24"](energy) k12 = self.rates_map["R12:k12"](energy) k13 = self.rates_map["R13:k13"](energy) k14 = self.rates_map["R14:k14"](energy) k15 = self.rates_map["R15:k15"](energy) ekio = self.rates_map["R2:ki"].bvar ekex = self.rates_map["R3:kex"].bvar ekiarm = self.rates_map["R4:kiarm"].bvar ekdisih4 = self.rates_map["R6:kdisih4"].bvar ekdsih3 = self.rates_map["R7:kdsih3"].bvar ekdsih2 = self.rates_map["R8:kdsih2"].bvar ekisih3 = self.rates_map["R9:kisih3"].bvar ekv13 = self.rates_map["R10:kv13"].bvar ekv24 = self.rates_map["R11:kv24"].bvar nar = self.nar nsih4 = self.nsih4 flux_arp = self.flux_arp flux_ar = self.flux_ar flux_sih3p = self.flux_sih3p flux_sih3 = self.flux_sih3 flux_sih2 = self.flux_sih2 ratio_av = self.ratio_av sourcedrain = self.density_sourcedrain with_tunnel = self.plasmasystem.with_tunnel nsih3p = nel - narp - self.nano_qdens dnel = (+kio*nar*nel + kiarm*nel*narm + kdisih4*nel*nsih4 + kisih3*nel*nsih3 - flux_arp*ratio_av*narp - flux_sih3p*ratio_av*nsih3p - sourcedrain[0]*nel + with_tunnel*sourcedrain[4]) dnarp = (+kio*nar*nel + kiarm*nel*narm - flux_arp*ratio_av*narp - sourcedrain[1]*narp) dnarm = (+ kex*nar*nel - kiarm*narm*nel - k12*narm*nsih4 - k13*narm*nsih4 - k14*narm*nsih3 - k15*narm*nsih2 - flux_ar*ratio_av*narm) dnsih3p = (+ kdisih4*nel*nsih4 + kisih3*nel*nsih3 - flux_sih3p*ratio_av*nsih3p) dnsih3 = (+ kdsih3*nel*nsih4 - kisih3*nel*nsih3 + k12*narm*nsih4 - k14*narm*nsih3 - flux_sih3*ratio_av*nsih3) dnsih2 = (+ kdsih2*nel*nsih4 + k13*narm*nsih4 + k14*narm*nsih3 - k15*narm*nsih2 - flux_sih2*ratio_av*nsih2) power = self.plasmasystem.power reactor_volume = self.reactor_volume vsheath = self.vsheath armass = self.plasmasystem.armass sih4mass = self.plasmasystem.sih4mass dneps = (power/reactor_volume - ekio*kio*nar*nel - ekex*kex*nar*nel - ekiarm*kiarm*narm*nel - (5./3.)*self.bohm_velocity(armass)*ratio_av*neps - QE*vsheath*self.bohm_velocity(armass)*ratio_av*nel - (5./3.)*self.bohm_velocity(sih4mass)*ratio_av*neps - QE*vsheath*self.bohm_velocity(sih4mass)*ratio_av*nel - 3.0*(ME/armass)*kel*neps*nar - 3.0*(ME/sih4mass)*kelsih4*neps*nsih4 - ekisih3*kisih3*nel*nsih3 - ekdisih4*kdisih4*nel*nsih4 - ekdsih3*kdsih3*nel*nsih4 - ekdsih2*kdsih2*nel*nsih4 - ekv13*kv13*nel*nsih4 - ekv24*kv24*nel*nsih4 - sourcedrain[6]*nel + with_tunnel*sourcedrain[5]) return np.nan_to_num([dnel, dnarp, dnarm, dnsih3p, dnsih3, dnsih2, dneps], copy=False)
11,282
4,235
from typing import Any, Dict, List, Type, TypeVar, Union import attr from ..models.secret import Secret from ..types import UNSET, Unset T = TypeVar("T", bound="CryptFsConfig") @attr.s(auto_attribs=True) class CryptFsConfig: """Crypt filesystem configuration details""" passphrase: Union[Unset, Secret] = UNSET additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict) def to_dict(self) -> Dict[str, Any]: passphrase: Union[Unset, Dict[str, Any]] = UNSET if not isinstance(self.passphrase, Unset): passphrase = self.passphrase.to_dict() field_dict: Dict[str, Any] = {} field_dict.update(self.additional_properties) field_dict.update({}) if passphrase is not UNSET: field_dict["passphrase"] = passphrase return field_dict @classmethod def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T: d = src_dict.copy() _passphrase = d.pop("passphrase", UNSET) passphrase: Union[Unset, Secret] if isinstance(_passphrase, Unset): passphrase = UNSET else: passphrase = Secret.from_dict(_passphrase) crypt_fs_config = cls( passphrase=passphrase, ) crypt_fs_config.additional_properties = d return crypt_fs_config @property def additional_keys(self) -> List[str]: return list(self.additional_properties.keys()) def __getitem__(self, key: str) -> Any: return self.additional_properties[key] def __setitem__(self, key: str, value: Any) -> None: self.additional_properties[key] = value def __delitem__(self, key: str) -> None: del self.additional_properties[key] def __contains__(self, key: str) -> bool: return key in self.additional_properties
1,842
593
# -*- coding: utf-8 -*- from django.shortcuts import render, HttpResponse,redirect from django.http import JsonResponse from .forms import RestaurantesForm from .models import restaurants, addr#, image from django.contrib.auth.decorators import login_required import logging log = logging.getLogger(__name__) # Create your views here. def index(request): log.info("INDEX - Hey there it works!!") context = { 'menu': 'index' } #return HttpResponse('My Restaurants Manager') return render(request,'index.html',context) def test(request): valor = 3 context = { 'variable': valor, 'resta': restaurants.objects[:5], } # Aqui van la las variables para la plantilla return render(request,'test.html', context) @login_required def listar(request): log.info("LIST - Hey there it works!!") context = { 'resta': restaurants.objects[:10], 'menu': 'list' } # Aqui van la las variables para la plantilla return render(request,'listar.html', context) @login_required def buscar(request): log.info("SEARCH - Hey there it works!!") cocina = request.GET.get('cocina') lista=restaurants.objects(cuisine__icontains=cocina) context = { 'resta': lista, } return render(request,'listar.html', context) @login_required def add(request): log.info("ADD - Hey there it works!!") formu = RestaurantesForm() if request.method == "POST": formu = RestaurantesForm(request.POST, request.FILES) if formu.is_valid(): # valida o anhade errores # datos sueltos nombre = formu.cleaned_data['nombre'] cocina = formu.cleaned_data['cocina'] barrio = formu.cleaned_data['barrio'] calle = formu.cleaned_data['direccion'] imagen = request.FILES['imagen'] #formu.cleaned_data['imagen'] #tipo_foto = imagen.content_type # tipo y nombre direc = addr(street=calle) #i = image(extension=tipo_foto, img=imagen) r = restaurants(name=nombre, cuisine=cocina, borough=barrio, address=direc , image=imagen) r.save() # formu.save() # si está ligado al model return redirect(index) # GET o error context = { 'form': formu, 'menu': 'add', } return render(request, 'form.html', context) # @login_required # def update(request): # log.info("UPD - Hey there it works!!") # name = request.GET.get('name') # obj=restaurants.objects(name=name) # context = { # 'resta': obj, # } # return render(request,'formUpdate.html', context) # url @login_required def restaurant(request, name): log.info("DETAIL - Hey there it works!!") resta=restaurants.objects(name=name)[0] context = { 'resta': resta } return render(request, 'detalle.html', context) # recuperar foto @login_required def imagen(request, name): log.info("IMAGE - Hey there it works!!") res = restaurants.objects(name=name)[0] img = res.image.read() return HttpResponse(img, content_type="image/" + res.image.format) def r_ajax(request, name): log.info("AJAX - Hey there it works!!") resta = restaurants.objects(name=name)[0] maps = '<iframe width="450" height="300" frameborder="0" style="border:0" src="https://maps.google.com/maps?q='+str(name) + ' ' + str(resta.address.street) + ' ' + str(resta.borough)+'&amp;ie=UTF8&amp;&amp;output=embed" allowfullscreen></iframe>' return JsonResponse({'map':maps}) # podría ser string o HTML
3,598
1,167
import cb, time, struct, sys, random, string try: import console console.set_color(0.0,0.2,1) print """ _____ _ _____ _ | |___ _| |___| __ | |_ _ ___ | --| . | . | -_| __ -| | | | -_| |_____|___|___|___|_____|_|___|___| _____ _ __/ ___/ | | / / __ \ | |/ / /_/ / |___/\____/ """ console.set_color() console.set_font() except: print """ _____ _ _____ _ | |___ _| |___| __ | |_ _ ___ | --| . | . | -_| __ -| | | | -_| |_____|___|___|___|_____|_|___|___| _____ _ __/ ___/ | | / / __ \ | |/ / /_/ / |___/\____/ """ try: import printbyte except: class printbyte (object): def byte_pbyte(data): if len(str(data)) > 1: msg = list(data) s = 0 for u in msg: u = str(u).encode("hex") u = "\\x"+u msg[s] = u s = s + 1 msg = "".join(msg) else: msg = data msg = str(msg).encode("hex") msg = "\\x"+msg return msg shell = False verbose = True devices = [] responses = [] sim_names = False blacklist = "none", "None", "unknown", "", "Unknon" def ani_load(msg,amt=5,tm=0.1,rng=(1,3)): for t in range(random.randint(rng[0],rng[1])): for _ in range(1,amt): sys.stdout.write("\r"+msg+"."*_+" ") time.sleep(tm) print class BlueBorne (object): def did_update_state(self): pass def did_discover_peripheral(self, p): if "" in p.uuid and str(p.name) not in blacklist and p.name not in devices: print "\n"+"="*36+"\n" if verbose: print "[+] Discovered " + str(p.name) self.peripheral = p cb.connect_peripheral(p) def did_disconnect_peripheral(self,p,error): try: print "[-] %s Disconnected" %(p.name) self.peripheral.cancel_peripheral_connection(p) except: pass def did_connect_peripheral(self, p): print "[+] Connected " + p.name p.discover_services() def did_discover_services(self, p, error): if not sim_names: devices.append(p.name) responses = [] print for s in p.services: if "" in s.uuid: if verbose: print "[+] Service " + s.uuid p.discover_characteristics(s) print def did_discover_characteristics(self, s, error): for c in s.characteristics: if "" in c.uuid: if verbose: print "[+] Characteristic " + c.uuid if shell: ani_load("[+] Generating Payload") if shell: self.peripheral.write_characteristic_value(c,shell,True) print "Payload -> %s//%s" %(self.peripheral.name[:6], c.uuid[:6]) try: self.peripheral.read_characteristic_value(c) self.peripheral.set_notify_value(c, True) except Exception as e: pass def did_update_value(self, c, error): if c.uuid not in responses: print "[*] Checking Response For %s" %c.uuid[:6] try: if len(c.value) == 10 and "\x70" in c.value: ten = False resp = str(c.value) if str(c.uuid) == "2A24": resp = str(c.value) elif len(c.value) == 1: resp = eval(printbyte.byte_pbyte(c.value).replace("\\x","0x")) elif ten: resp = printbyte.byte_pbyte(c.value) print "[%]",resp print except Exception as e: try: print "[-] No Response" print except: pass pass responses.append(c.uuid) def did_write_value(self,c,error): try: print "[+] Payload Finished %s\n[=] Scanning Info On %s" %(c.uuid[:6],self.peripheral.name[:6]) except: pass cb.set_central_delegate(BlueBorne()) ani_load("[*] Scanning For Devices",5,0.15,(6,8)) cb.scan_for_peripherals() try: while True: time.sleep(0.1) except KeyboardInterrupt: cb.reset() cb.stop_scan()
3,705
1,718
from django.urls import path from .views import * app_name = 'products' urlpatterns = [ path('create', CreateProduct.as_view(), name='create'), path('view/<int:pk>', ProductDetail.as_view(), name='detail'), path('list', ProductList.as_view(), name='list'), path('<int:pk>/update', ProductUpdate.as_view(), name='update'), ]
342
111
import functools import os import signal import sys from abc import ABC from enum import Enum from pathlib import Path from typing import Callable, ClassVar, List, Optional from jsonschema.exceptions import ValidationError as JsonSchemaValidationError from requests.exceptions import RequestException from yaml.error import MarkedYAMLError from panoramic.cli.paths import Paths from panoramic.cli.print import echo_error DIESEL_REQUEST_ID_HEADER = 'x-diesel-request-id' class CliBaseException(Exception): request_id: Optional[str] = None def add_request_id(self, request_id: str): self.request_id = request_id return self def extract_request_id(self, exc: RequestException): headers = getattr(exc.response, 'headers', {}) return self.add_request_id(headers.get(DIESEL_REQUEST_ID_HEADER)) def __str__(self) -> str: if self.request_id is not None: return f'{super().__str__()} (RequestId: {self.request_id})' return super().__str__() def __repr__(self) -> str: if self.request_id is not None: return f'{super().__repr__()} (RequestId: {self.request_id})' return super().__repr__() class TimeoutException(CliBaseException): """Thrown when a remote operation times out.""" class IdentifierException(CliBaseException): """Error refreshing metadata.""" def __init__(self, source_name: str, table_name: str): super().__init__(f'Identifiers could not be generated for table {table_name} in data connection {source_name}') class JoinException(CliBaseException): """Error detecting joins in a dataset.""" def __init__(self, dataset_name: str): super().__init__(f'Joins could not be detected for {dataset_name}') class RefreshException(CliBaseException): """Error refreshing metadata.""" def __init__(self, source_name: str, table_name: str): super().__init__(f'Metadata could not be refreshed for table {table_name} in data connection {source_name}') class SourceNotFoundException(CliBaseException): """Thrown when a source cannot be found.""" def __init__(self, source_name: str): super().__init__(f'Data connection {source_name} not found. Has it been connected?') class DatasetNotFoundException(CliBaseException): """Thrown when a dataset cannot be found.""" def __init__(self, dataset_name: str): super().__init__(f'Dataset {dataset_name} not found. Has it been created?') class ScanException(CliBaseException): """Error scanning metadata.""" def __init__(self, source_name: str, table_filter: Optional[str]): table_msg = f' {table_filter} ' if table_filter is not None else ' ' super().__init__(f'Metadata could not be scanned for table(s){table_msg}in data counnection: {source_name}') class InvalidModelException(CliBaseException): """Invalid model submitted to remote.""" messages: List[str] def __init__(self, error: RequestException): try: self.messages = [ error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors'] ] except Exception: self.messages = ['Invalid model submitted'] class InvalidDatasetException(CliBaseException): """Invalid model submitted to remote.""" messages: List[str] def __init__(self, error: RequestException): try: self.messages = [ error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors'] ] except Exception: self.messages = ['Invalid dataset submitted'] class InvalidFieldException(CliBaseException): """Invalid field submitted to remote.""" messages: List[str] def __init__(self, error: RequestException): try: self.messages = [ error['msg'] for error in error.response.json()['error']['extra_data']['validation_errors'] ] except Exception: self.messages = ['Invalid field submitted'] class DatasetWriteException(CliBaseException): """Error writing dataset to remote state.""" def __init__(self, dataset_name: str): super().__init__(f'Error writing dataset {dataset_name}') class ModelWriteException(CliBaseException): """Error writing dataset to remote state.""" def __init__(self, dataset_name: str, model_name: str): super().__init__(f'Error writing model {model_name} in dataset {dataset_name}') class FieldWriteException(CliBaseException): """Error writing field to remote state.""" def __init__(self, dataset_name: Optional[str], field_name: str): message = f'Error writing field {field_name}' if dataset_name is not None: message += f' in dataset {dataset_name}' super().__init__(message) class ValidationErrorSeverity(Enum): WARNING = 'WARNING' ERROR = 'ERROR' class ValidationError(CliBaseException, ABC): """Abstract error raised during validation step.""" severity: ClassVar[ValidationErrorSeverity] = ValidationErrorSeverity.ERROR class FileMissingError(ValidationError): """File that should exist didn't.""" def __init__(self, *, path: Path): if path == Paths.context_file(): msg = f'Context file ({path.name}) not found in current working directory. Run pano init to create it.' else: # Should not happen => we only check above files exist explicitly msg = f'File Missing - {path}' super().__init__(msg) def __eq__(self, o: object) -> bool: if not isinstance(o, FileMissingError): return False return str(self) == str(o) class DuplicateModelNameError(ValidationError): """Two local models use the same model name.""" def __init__(self, *, model_name: str, paths: List[Path]) -> None: try: paths = [path.relative_to(Path.cwd()) for path in paths] except ValueError: pass # Use relative path when possible path_lines = ''.join(f'\n in {path}' for path in paths) super().__init__(f'Multiple model files use model name {model_name}{path_lines}') def __eq__(self, o: object) -> bool: if not isinstance(o, DuplicateModelNameError): return False return str(self) == str(o) class DuplicateFieldSlugError(ValidationError): """Two local models use the same model name.""" def __init__(self, *, field_slug: str, paths: List[Path]) -> None: try: paths = [path.relative_to(Path.cwd()) for path in paths] except ValueError: pass # Use relative path when possible path_lines = ''.join(f'\n in {path}' for path in paths) super().__init__(f'Multiple field files use slug {field_slug}{path_lines}') def __eq__(self, o: object) -> bool: if not isinstance(o, DuplicateFieldSlugError): return False return str(self) == str(o) class InvalidYamlFile(ValidationError): """YAML syntax error.""" def __init__(self, *, path: Path, error: MarkedYAMLError): try: path = path.relative_to(Path.cwd()) except ValueError: pass # Use relative path when possible super().__init__(f'Invalid YAML file - {error.problem}\n on line {error.problem_mark.line}\n in {path}') def __eq__(self, o: object) -> bool: if not isinstance(o, InvalidYamlFile): return False return str(self) == str(o) class DeprecatedAttributeWarning(ValidationError): severity = ValidationErrorSeverity.WARNING def __init__(self, *, attribute: str, path: Path): try: path = path.relative_to(Path.cwd()) except ValueError: pass # Use relative path when possible super().__init__(f'Deprecated attribute "{attribute}" \n in {path}') def __eq__(self, o: object) -> bool: if not isinstance(o, DeprecatedAttributeWarning): return False return str(self) == str(o) class DeprecatedConfigProperty(ValidationError): severity = ValidationErrorSeverity.WARNING def __init__(self, property_: str, deprecation_message: Optional[str] = None): if deprecation_message is None: deprecation_message = "Property is deprecated" super().__init__(f"'{property_}': {deprecation_message}") def __eq__(self, o: object) -> bool: if not isinstance(o, DeprecatedConfigProperty): return False return str(self) == str(o) class JsonSchemaError(ValidationError): def __init__(self, *, path: Path, error: JsonSchemaValidationError): try: path = path.relative_to(Path.cwd()) except ValueError: pass # Use relative path when possible error_path = '.'.join(str(p) for p in error.path) super().__init__(f'{error.message}\n for path {error_path}\n in {path}') def __eq__(self, o: object) -> bool: if not isinstance(o, JsonSchemaError): return False return str(self) == str(o) class OrphanFieldFileError(ValidationError): severity = ValidationErrorSeverity.WARNING field_slug: str dataset_slug: str def __init__( self, *, field_slug: str, dataset_slug: str, ) -> None: self.field_slug = field_slug self.dataset_slug = dataset_slug super().__init__(f'Field {field_slug} under dataset {dataset_slug} not used by any model') def __eq__(self, o: object) -> bool: if not isinstance(o, OrphanFieldFileError): return False return str(self) == str(o) class MissingFieldFileError(ValidationError): field_slug: str dataset_slug: str data_reference: str identifier: bool model_name: str def __init__( self, *, field_slug: str, dataset_slug: str, data_reference: str, identifier: bool, model_name: str, ) -> None: self.field_slug = field_slug self.dataset_slug = dataset_slug self.data_reference = data_reference self.identifier = identifier self.model_name = model_name super().__init__(f'Missing field file for slug {field_slug} under dataset {dataset_slug}') def __eq__(self, o: object) -> bool: if not isinstance(o, MissingFieldFileError): return False return str(self) == str(o) def handle_exception(f: Callable): """Print exception and exit with error code.""" @functools.wraps(f) def wrapped(*args, **kwargs): try: return f(*args, **kwargs) except Exception: echo_error('Internal error occurred', exc_info=True) sys.exit(1) return wrapped def handle_interrupt(f: Callable): """Exit app on keyboard interrupt.""" @functools.wraps(f) def wrapped(*args, **kwargs): try: return f(*args, **kwargs) except KeyboardInterrupt: os._exit(128 + signal.SIGINT) return wrapped class ConnectionNotFound(CliBaseException): """Connection not found in config.""" def __init__(self): super().__init__('Connection was set up. Run: pano connection setup -h') class ConnectionUrlNotAvailableFound(CliBaseException): """Connection not found in config.""" def __init__(self): super().__init__('Connection has no url stored. Please call: pano connection setup --url <url>') class ConnectionCreateException(CliBaseException): """Failed to create connection due to error.""" def __init__(self, error_message: str): super().__init__(f'Failed to create connection: {error_message}.') class ConnectionUpdateException(CliBaseException): """Failed to update connection due to error.""" def __init__(self, error_message: str): super().__init__(f'Failed to update connection: {error_message}.') class ConnectionFormatException(CliBaseException): """Failed to update connection due to error.""" def __init__(self, credential_error: str): super().__init__(f'Invalid credentials format FAIL: {credential_error}') class TransformCompileException(CliBaseException): """Failed to compile a Transform due to an error.""" def __init__(self, transform_name: str): super().__init__(f'Error compiling transform {transform_name}') class TransformExecutionFailed(Exception): """Failed to execute a transform on the remote connection""" compiled_sql: str def __init__(self, transform_name: str, compiled_sql: str): self.compiled_sql = compiled_sql super().__init__(f'Error executing transform {transform_name}') class ExecuteInvalidArgumentsException(CliBaseException): """Failed to compile a execute due to invalid arguments.""" def __init__(self, message: str): super().__init__(message)
13,004
3,730
""" Builds and runs application """ from app import app, user_datastore, db from api_module.api_routes import api from auth_module.auth_routes import auth app.register_blueprint(api) app.register_blueprint(auth) if __name__ == "__main__": # database = create_db(connection_str) # attach_db(g, database) app.run(port=3000)
336
114
''' flask_miracle ------------- This module provides a fabric layer between the Flask framework and the Miracle ACL library. :copyright: (c) 2017 by Timo Puschkasch. :license: BSD, see LICENSE for more details. ''' from .base import Acl from .functions import check_all, check_any, set_current_roles from .decorators import macl_check_any, macl_check_all
381
126
from flask import Blueprint bp = Blueprint('tags', __name__) @bp.record_once def register(state): from sopy.tags import models
134
44
# -*- coding: utf-8 -*- """{{ cookiecutter.project_slug }} rest-api handlers.""" from .security import security_router __all__ = ("security_router",)
152
53
from datetime import datetime from .mixins import ArtistMixin, ExternalIDMixin, ExternalURLMixin, ImageMixin, TrackMixin from .object import SpotifyObject from .track import SimpleTrack class _BaseAlbum(SpotifyObject, TrackMixin, ImageMixin, ExternalURLMixin, ArtistMixin): _type = 'album' _track_class = SimpleTrack __date_fmt = dict(year='%Y', month='%Y-%m', day='%Y-%m-%d') def __init__(self, client, data): super().__init__(client, data) TrackMixin.__init__(self, data) ImageMixin.__init__(self, data) ExternalURLMixin.__init__(self, data) ArtistMixin.__init__(self, data) self.album_group = data.pop('album_group', None) # can be None, though this is not specified in the API docs self.album_type = data.pop('album_type') self.available_markets = data.pop('available_markets', None) self.release_date_precision = data.pop('release_date_precision') if self.release_date_precision is None: self.release_date = None else: try: self.release_date = datetime.strptime( data.pop('release_date'), self.__date_fmt[self.release_date_precision] ) except ValueError: self.release_date = None class SimpleAlbum(_BaseAlbum): ''' Represents an Album object. .. note:: To iterate all tracks, you have to use the ``async for`` construct or fill the object with ``.fill()`` before iterating ``.tracks``. id: str Spotify ID of the album. name: str Name of the album. tracks: List[:class:`Track`] List of tracks on the album. artists: List[:class:`Artist`] List of artists that appear on the album. images: List[:class:`Image`] List of associated images, such as album cover in different sizes. track_count: int The expected track count as advertised by the last paging object. ``is_filled()`` can return True even if fewer tracks than this exists in ``tracks``, since some fetched tracks from the API can be None for various reasons. uri: str Spotify URI of the album. link: str Spotify URL of the album. type: str Plaintext string of object type: ``album``. album_type: Type of album, e.g. ``album``, ``single`` or ``compilation``. available_markets: List[str] or None Markets where the album is available: ISO-3166-1_. external_urls: dict Dictionary that maps type to url. release_date: `datetime <https://docs.python.org/3/library/datetime.html#module-datetime>`_ Date (and maybe time) of album release. release_date_precision: str Precision of ``release_date``. Can be ``year``, ``month``, or ``day``. album_group: str or None Type of album, e.g. ``album``, ``single``, ``compilation`` or ``appears_on``. ''' class FullAlbum(_BaseAlbum, ExternalIDMixin): ''' Represents a complete Album object. This type has some additional attributes not existent in :class:`SimpleAlbum`. genres: List[str] List of genres associated with the album. label: str The label for the album. popularity: int An indicator of the popularity of the album, 0 being least popular and 100 being the most. copyrights: dict List of copyright objects. external_ids: dict Dictionary of external IDs. ''' def __init__(self, client, data): super().__init__(client, data) ExternalIDMixin.__init__(self, data) self.genres = data.pop('genres') self.label = data.pop('label') self.popularity = data.pop('popularity') self.copyrights = data.pop('copyrights')
3,374
1,185
__version__ = "5.1.3" LOGGER_NAME = "connector.virustotal_intelligence"
73
31
from training.config_interface.BaseTrainingProcess import BaseTrainingProcess from training.config_interface.BaseTrainingEpoch import BaseTrainingEpoch
152
39
# -*- coding: utf-8 -*- # Copyright (c) 2017, masonarmani38@gmail.com and contributors # For license information, please see license.txt from __future__ import unicode_literals import frappe from frappe.model.document import Document class StationariesLog(Document): def on_submit(self): for item in self.items_issued: _create_bin_card(item, self) def _create_bin_card(item, doc): import datetime last_value = _last_bin_card_value(item) if last_value[0] < 0: frappe.throw("No more ") new_bin_card = frappe.new_doc("Stationaries Bin Card") new_bin_card.date = datetime.datetime.today() new_bin_card.item = item.item_issued new_bin_card.value = item.pqty new_bin_card.current_value = last_value[0] + item.pqty new_bin_card.last_value = last_value[2] new_bin_card.reference_doctype = doc.doctype new_bin_card.reference_docname = doc.name new_bin_card.ppu = last_value[1] new_bin_card.count = last_value[0] + item.pqty less = new_bin_card.count if less >= new_bin_card.ppu: # count and current has to change unit = int(less / new_bin_card.ppu) if (less / new_bin_card.ppu) > 1: new_count = less % new_bin_card.ppu else: new_count = less - new_bin_card.ppu # set new values new_bin_card.count = new_count new_bin_card.current_value = new_count # set item values item.qty = unit item.ppu = new_bin_card.ppu # remove value from stock _remove_unit(item) new_bin_card.submit() def _remove_unit(item): wh = "Stationaries - GCL" se = frappe.new_doc("Stock Entry") se.purpose = "Material Issue" se.title = "Material Issue" se.from_warehouse = wh # using the latest cost center for item last_cost_center = frappe.get_list(doctype="Stock Entry Detail", filters={"item_code": item.item_issued}, fields=['cost_center'], order_by='creation') d_cost_center = "" if last_cost_center[0].get('cost_center') != None: d_cost_center = last_cost_center[0].cost_center it = frappe.get_list(doctype="Item", filters={"name": item.item_issued}, fields=['stock_uom, item_name']) # set new item item = dict( f_warehouse=wh, t_warehouse="", qty=item.qty, item_code=item.item_issued, item_name=it[0].item_name, uom=it[0].stock_uom, cost_center=d_cost_center ) se.append('items', item) se.submit() def _last_bin_card_value(item): last_value = frappe.db.sql("SELECT `count`, ppu, current_value FROM `tabStationaries Bin Card` where item = '{item}' " "ORDER BY date DESC LIMIT 1".format(item=item.item_issued)) if len(last_value): return last_value[0] return [0, item.ppu, 0]
2,948
1,019
import time def main(request, response): delay = float(request.GET.first("ms", 500)) time.sleep(delay / 1E3); return [("Content-type", "text/javascript")], "export let delayedLoaded = true;"
205
69
import struct import os import sys import subprocess if len(sys.argv) != 2: print('Usage: python %s filename \n output is *.spv *.yariv and *.hex file \n' % sys.argv[0]) quit() inputfilepath = sys.argv[1] outputname = os.path.basename(inputfilepath) outdir = os.path.dirname(inputfilepath) ginfile = os.path.basename(inputfilepath) ooutdir = os.path.join(outdir,"bin"); spirvcompiler = 'glslangValidator' if os.name == 'nt': spirvcompiler += ".exe" yariv_pack = './yariv_pack' if os.name == 'nt': spirvcompiler += ".exe" if not os.path.isdir(ooutdir): os.mkdir(ooutdir, 0o0755 ); subprocess.call([spirvcompiler,'-V100',inputfilepath,'-o',os.path.join(ooutdir,ginfile) + '.spv']) subprocess.call([yariv_pack,os.path.join(ooutdir,ginfile) + '.spv']) infile = open(os.path.join(ooutdir,ginfile) + '.yariv', 'rb') outfilepath = os.path.join(ooutdir,outputname + '.hex') outfile = open(outfilepath, 'w') lineno = 1 while 1 : b = infile.read(1) if len(b) == 0 : break d, = struct.unpack('B', b) outfile.write(hex(d) + ',') if lineno % 20 == 0: outfile.write('\n') lineno = lineno + 1
1,119
467
#!/usr/bin/env python3 import sys import logging import argparse import tqdm import dataset.budgetary from model import * from test import MockWorker from dataset import load_raw_csv from gui.estimation import Options as EstimationOpts from dataset.experimental_data import ExperimentalData logging.basicConfig(level=logging.DEBUG) log = logging.getLogger(__name__) class ProgressWorker: def __init__(self): self.bar = None self.size = None self.last_value = 0 def set_work_size(self, size : int) -> None: self.size = size self.bar = tqdm.tqdm(total=size) def set_progress(self, value : int) -> None: self.bar.update(value - self.last_value) self.last_value = value def budgetary_consistency(args): ds = dataset.budgetary.load_from_csv(args.fname_in) dsc = ds.analysis_consistency(ProgressWorker(), None) variant = dsc._get_export_variant(args.export_variant) dsc.export(args.fname_out, '*.csv', variant, ProgressWorker()) def estimate(args): rows = load_raw_csv(args.fname_in) ds = ExperimentalData.from_csv('dataset', rows[1:], (0, 1, None, 2)) AVAILABLE_MODELS = [ preorder(strict=True, total=True), preorder(strict=False, total=True), unattractive(strict=True, total=True), unattractive(strict=False, total=True), preorder(strict=True, total=False), preorder(strict=False, total=False), UndominatedChoice(strict=True), UndominatedChoice(strict=False), PartiallyDominantChoice(fc=True), PartiallyDominantChoice(fc=False), Overload(PreorderParams(strict=True, total=True)), Overload(PreorderParams(strict=False, total=True)), StatusQuoUndominatedChoice(), TopTwo(), SequentiallyRationalizableChoice(), ] if not args.models: print('Please specify a model using -m:') for m in AVAILABLE_MODELS: print(' ' + str(m)) sys.exit(1) if args.models == 'all': models = AVAILABLE_MODELS else: models = [ m for m in AVAILABLE_MODELS if str(m) in args.models ] dsm = ds.analysis_estimation(ProgressWorker(), EstimationOpts( models=models, disable_parallelism=args.sequential, )) variant = dsm._get_export_variant(args.export_variant) dsm.export(args.fname_out, '*.csv', variant, MockWorker()) def main(args): if args.action == 'estimate': estimate(args) elif args.action == 'budgetary': budgetary_consistency(args) else: raise Exception(f'unknown action: {args.action}') if __name__ == '__main__': ap = argparse.ArgumentParser() sub = ap.add_subparsers(dest='action', help='subcommands') sub.required = True apE = sub.add_parser('estimate', help='model estimation') apE.add_argument('fname_in', metavar='input.csv') apE.add_argument('fname_out', metavar='output.csv') apE.add_argument('-e', dest='export_variant', default='compact (human-friendly)', help='export variant [%(default)s]', ) apE.add_argument('-s', '--sequential', default=False, action='store_true', help='disable paralellism') apE.add_argument('-m', dest='models', metavar='MODEL', nargs='+', help='model(s)') apB = sub.add_parser('budgetary', help='budgetary consistency') apB.add_argument('fname_in', metavar='input.csv') apB.add_argument('fname_out', metavar='output.csv') apB.add_argument('-e', dest='export_variant', default='Summary', help='export variant [%(default)s]', ) main(ap.parse_args())
3,672
1,213
# file KML.py # "Produces a kml file from the track as defined in ModuleConstructor.Track." # Strategy here is to produce two .kml files, one that references # google.com and one that references acserver.raf.ucar.edu, the latter # for use on the aircraft to avoid remote connections to google.com # in flight. The latter is named PlanAC.kml, the former Plan.kml. # # This is awkward code that writes many things repeatedly where I'm sure # there is an efficient way to do this. Someday should clean this up -- # but it works, so leave it for now. It was copied from a Google-Earth- # constructed representation of the track, so I'm just taking all the # kml that was in that file and duplicating it without understanding what # I'm doing... import Specs WaypointNumber = 0 KMLFileName = 'Plan.kml' lonx = Specs.TakeoffLocation()[0] latx = Specs.TakeoffLocation()[1] galtx = Specs.TakeoffLocation()[2] # header info for .kml file def KMLHeader(KMLFileName): "Opens the file and writes the required header." # XXXX fix this global WaypointNumber # changed here so needs to be global KMLACFileName = KMLFileName.replace ('Plan', 'PlanAC') print 'kml file name: ', KMLFileName, ', new name is: ', KMLACFileName KMLFile = open(KMLFileName,'w') KMLACFile = open(KMLACFileName,'w') KMLFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") KMLFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n") KMLFile.write("<Document>\n") # might need to replace .kml with .kmz here? KMLFile.write("\t <name>"+KMLFileName+"</name>\n") KMLFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n") KMLFile.write("\t\t<Pair>\n") KMLFile.write("\t\t\t<key>normal</key>\n") KMLFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\ + "</styleUrl>\n") KMLFile.write("\t\t</Pair>\n") KMLFile.write("\t\t<Pair>\n") KMLFile.write("\t\t\t<key>highlight</key>\n") KMLFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\ +"</styleUrl>\n") KMLFile.write("\t\t</Pair>\n") KMLFile.write("\t</StyleMap>\n") KMLFile.write("\t <Style id=\"sh_triangle_copy1\">\n") KMLFile.write("\t\t <IconStyle>\n") KMLFile.write("\t\t\t <color>ff0000ff</color>\n") KMLFile.write("\t\t\t <scale>0.8</scale>\n") KMLFile.write("\t\t\t <Icon>\n") # KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n") KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n") KMLFile.write("\t\t\t </Icon>\n") KMLFile.write("\t\t </IconStyle>\n") KMLFile.write("\t\t <LabelStyle>\n") KMLFile.write("\t\t\t <color>ff0000ff</color>\n") KMLFile.write("\t\t </LabelStyle>\n") KMLFile.write("\t\t <LineStyle>\n") KMLFile.write("\t\t\t <color>ff00aaff</color>\n") KMLFile.write("\t\t\t <width>2</width>\n") KMLFile.write("\t\t </LineStyle>\n") KMLFile.write("\t\t <ListStyle>\n") KMLFile.write("\t\t </ListStyle>\n") KMLFile.write("\t </Style>\n") KMLFile.write("\t <Style id=\"sn_triangle_copy1\">\n") KMLFile.write("\t\t <IconStyle>\n") KMLFile.write("\t\t\t <color>ff0000ff</color>\n") KMLFile.write("\t\t\t <scale>0.8</scale>\n") KMLFile.write("\t\t\t <Icon>\n") # KMLFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n") KMLFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/placemark_square.png</href>\n") KMLFile.write("\t\t\t </Icon>\n") KMLFile.write("\t\t </IconStyle>\n") KMLFile.write("\t\t <LabelStyle>\n") KMLFile.write("\t\t\t <color>ff0000ff</color>\n") KMLFile.write("\t\t </LabelStyle>\n") KMLFile.write("\t\t <LineStyle>\n") KMLFile.write("\t\t\t <color>ff00aaff</color>\n") KMLFile.write("\t\t\t <width>2</width>\n") KMLFile.write("\t\t </LineStyle>\n") KMLFile.write("\t\t <ListStyle>\n") KMLFile.write("\t\t </ListStyle>\n") KMLFile.write("\t </Style>\n") KMLACFile.write("<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n") KMLACFile.write("<kml xmlns=\"http://earth.google.com/kml/2.2\">\n") KMLACFile.write("<Document>\n") # might need to replace .kml with .kmz here? KMLACFile.write("\t <name>"+KMLACFileName+"</name>\n") KMLACFile.write("\t<StyleMap id=\"msn_triangle_copy1\">\n") KMLACFile.write("\t\t<Pair>\n") KMLACFile.write("\t\t\t<key>normal</key>\n") KMLACFile.write("\t\t\t<styleUrl>#sn_triangle_copy1"\ + "</styleUrl>\n") KMLACFile.write("\t\t</Pair>\n") KMLACFile.write("\t\t<Pair>\n") KMLACFile.write("\t\t\t<key>highlight</key>\n") KMLACFile.write("\t\t\t<styleUrl>#sh_triangle_copy1"\ +"</styleUrl>\n") KMLACFile.write("\t\t</Pair>\n") KMLACFile.write("\t</StyleMap>\n") KMLACFile.write("\t <Style id=\"sh_triangle_copy1\">\n") KMLACFile.write("\t\t <IconStyle>\n") KMLACFile.write("\t\t\t <color>ff0000ff</color>\n") KMLACFile.write("\t\t\t <scale>0.8</scale>\n") KMLACFile.write("\t\t\t <Icon>\n") # KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/triangle.png</href>\n") KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n") KMLACFile.write("\t\t\t </Icon>\n") KMLACFile.write("\t\t </IconStyle>\n") KMLACFile.write("\t\t <LabelStyle>\n") KMLACFile.write("\t\t\t <color>ff0000ff</color>\n") KMLACFile.write("\t\t </LabelStyle>\n") KMLACFile.write("\t\t <LineStyle>\n") KMLACFile.write("\t\t\t <color>ff00aaff</color>\n") KMLACFile.write("\t\t\t <width>2</width>\n") KMLACFile.write("\t\t </LineStyle>\n") KMLACFile.write("\t\t <ListStyle>\n") KMLACFile.write("\t\t </ListStyle>\n") KMLACFile.write("\t </Style>\n") KMLACFile.write("\t <Style id=\"sn_triangle_copy1\">\n") KMLACFile.write("\t\t <IconStyle>\n") KMLACFile.write("\t\t\t <color>ff0000ff</color>\n") KMLACFile.write("\t\t\t <scale>0.8</scale>\n") KMLACFile.write("\t\t\t <Icon>\n") KMLACFile.write("\t\t\t\t <href>http://acserver.raf.ucar.edu/flight_data/display/placemark_square.png</href>\n") # KMLACFile.write("\t\t\t\t <href>http://maps.google.com/mapfiles/kml/shapes/triangle.png</href>\n") KMLACFile.write("\t\t\t </Icon>\n") KMLACFile.write("\t\t </IconStyle>\n") KMLACFile.write("\t\t <LabelStyle>\n") KMLACFile.write("\t\t\t <color>ff0000ff</color>\n") KMLACFile.write("\t\t </LabelStyle>\n") KMLACFile.write("\t\t <LineStyle>\n") KMLACFile.write("\t\t\t <color>ff00aaff</color>\n") KMLACFile.write("\t\t\t <width>2</width>\n") KMLACFile.write("\t\t </LineStyle>\n") KMLACFile.write("\t\t <ListStyle>\n") KMLACFile.write("\t\t </ListStyle>\n") KMLACFile.write("\t </Style>\n") WaypointNumber = 0 return(KMLFile, KMLACFile) def KMLclose(KMLFile, KMLACFile): "Adds trailer to the .kml file and then closes it." KMLFile.write("</Document>\n") KMLFile.write("</kml>\n") KMLFile.close() KMLACFile.write("</Document>\n") KMLACFile.write("</kml>\n") KMLACFile.close() def PlotPoints (KMLFile, KMLACFile, points): "Plot the set of points on the .kml file" KMLFile.write("\t <Placemark>\n") KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n") KMLFile.write("\t\t <LineString>\n") KMLFile.write("\t\t\t <tessellate>1</tessellate>\n") KMLFile.write("\t\t\t <coordinates>\n") for x in points: KMLFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\ + format (x[1], 'f') + ','\ + format (x[2], 'f') + ' \n') KMLFile.write("\t\t\t </coordinates>\n") KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n") KMLFile.write("\t\t </LineString>\n") KMLFile.write("\t </Placemark>\n") KMLACFile.write("\t <Placemark>\n") KMLACFile.write("\t\t <styleUrl>#sh_triangle_copy1</styleUrl>\n") KMLACFile.write("\t\t <LineString>\n") KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n") KMLACFile.write("\t\t\t <coordinates>\n") for x in points: KMLACFile.write ("\t\t\t\t " + format (x[0], 'f') + ','\ + format (x[1], 'f') + ','\ + format (x[2], 'f') + ' \n') KMLACFile.write("\t\t\t </coordinates>\n") KMLACFile.write("\t\t </LineString>\n") KMLACFile.write("\t </Placemark>\n") def PlotWaypoint (KMLFile, KMLACFile, wp, label='', symbol = 'triangle'): "Adds waypoint symbol to the .kml file for plotting on Google Earth etc." # Copy from a Google-Earth-generated example # (I don't understand all this; it's just copied verbatim here. # It's likely this could be made more compact.) global WaypointNumber, lonx, latx, galtx # These are global because they are saved in order to # draw lines from the last point to this one. longitude = wp[0] latitude = wp[1] altitude = wp[2] WaypointNumber += 1 if (label == ''): label="WP"+format(WaypointNumber,'d') KMLFile.write("\t <Placemark>\n") KMLFile.write("\t\t <name>"+label+"</name>\n") KMLFile.write("\t\t <description>WayPoint "\ +format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n") KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1"\ +"</styleUrl>\n") KMLFile.write("\t\t <Point>\n") KMLFile.write("\t\t\t <coordinates>"+format(longitude,'f')\ +','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n") KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n") KMLFile.write("\t\t </Point>\n") KMLFile.write("\t </Placemark>\n") KMLFile.write("\t <Placemark>\n") KMLFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n") KMLFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n") KMLFile.write("\t\t <LineString>\n") KMLFile.write("\t\t\t <tessellate>1</tessellate>\n") KMLFile.write("\t\t\t <coordinates>\n") KMLFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\ +format(galtx,'f')+' '+format(longitude,'f')+','\ +format(latitude,'f')+','+format(altitude,'f')+'\n') # print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\ # +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f') KMLFile.write("\t\t\t </coordinates>\n") KMLFile.write("\t\t\t <altitudeMode>absolute</altitudeMode>\n") KMLFile.write("\t\t </LineString>\n") KMLFile.write("\t </Placemark>\n") KMLACFile.write("\t <Placemark>\n") KMLACFile.write("\t\t <name>"+label+"</name>\n") KMLACFile.write("\t\t <description>WayPoint "\ +format(round(altitude/(100))*100.,'.0f')+' ft'+"</description>\n") KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1"\ +"</styleUrl>\n") KMLACFile.write("\t\t <Point>\n") KMLACFile.write("\t\t\t <coordinates>"+format(longitude,'f')\ +','+format(latitude,'f')+','+format(altitude,'f')+"</coordinates>\n") KMLACFile.write("\t\t </Point>\n") KMLACFile.write("\t </Placemark>\n") KMLACFile.write("\t <Placemark>\n") KMLACFile.write("\t\t <name>"+"Path"+format(WaypointNumber,'d')+"</name>\n") KMLACFile.write("\t\t <styleUrl>#msn_triangle_copy1</styleUrl>\n") KMLACFile.write("\t\t <LineString>\n") KMLACFile.write("\t\t\t <tessellate>1</tessellate>\n") KMLACFile.write("\t\t\t <coordinates>\n") KMLACFile.write("\t\t\t\t "+format(lonx,'f')+','+format(latx,'f')+','\ +format(galtx,'f')+' '+format(longitude,'f')+','\ +format(latitude,'f')+','+format(altitude,'f')+'\n') # print 'Waypoint'+format(WaypointNumber,'d')+' '+format(longitude, '.2f')\ # +','+format(latitude, '.2f')+',' + format(round(altitude/100.)*100., '.0f') KMLACFile.write("\t\t\t </coordinates>\n") KMLACFile.write("\t\t </LineString>\n") KMLACFile.write("\t </Placemark>\n") lonx = longitude latx = latitude galtx = altitude return()
12,156
4,937
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Functions to make API calls. @author: amagrabi """ import requests def login(client_id, client_secret, project_key, scope, host = 'EU'): '''Authentification Args: client_id: client_id. client_secret: client_secret. project_key: project_key. scope: Scope of access (read, write, etc.). host: 'EU' or 'NA'. Returns: Authentification data. ''' headers = { 'Content-Type' : 'application/x-www-form-urlencoded' } body = "grant_type=client_credentials&scope=%s" % scope if host == 'EU': url = "https://auth.sphere.io/oauth/token" elif host == 'US': url = "https://auth.commercetools.co/oauth/token" else: raise Exception("Host is unknown (has to be 'EU' or 'US').") auth = (client_id, client_secret) r = requests.post(url, data=body, headers=headers, auth=auth) if r.status_code is 200: return r.json() else: raise Exception("Failed to get an access token. Are you sure you have added them to config.py?") def query(endpoint, project_key, auth, host = 'EU'): '''Fetch Data via API into Json-Format Args: endpoint: API endpoint (products, orders, etc.). project_key: project_key. auth: Login data. host: 'EU' or 'NA'. Returns: Query output in json. ''' headers = { "Authorization" : "Bearer %s" % auth["access_token"] } if host == 'EU': url = "https://api.sphere.io/%s/%s" % (project_key, endpoint) elif host == 'US': url = "https://api.commercetools.co/%s/%s" % (project_key, endpoint) else: raise Exception("Host is unknown (has to be 'EU' or 'US').") r = requests.get(url, headers=headers) data_json = r.json() # json-format as nested dict-/list-structure return data_json
1,932
639
import sys, re, string, numpy p_strongs = numpy.arange(0.1, 0.9, 0.1) costs = range(3, 10, 1) for p_s in p_strongs: p_meds = numpy.arange(0.1, 1-p_s, 0.1) for p_m in p_meds: p_w = 1 - p_s - p_m for cost in costs: filename = str(p_s) + "_" + str(p_m) + "_" + str(p_w) + "_" + str(cost) + ".church" wF = open("model_fits/" + filename, "w") wF.write("(define p-strong " + str(p_s) + ")\n" + "(define p-mod " + str(p_m) + ")\n" + "(define p-weak " + str(p_w) + ")\n" + "(define cost " + str(cost) + ")\n") f = open(sys.argv[1], "r") for l in f: wF.write(l) f.close() #print str(p_s) + "," + str(p_v) + "," + str(p_a) + "," + str(alpha)
769
338
import boto3 as aws import botocore from shuttl import app ## Class for AWS S3 storage class Storage: bucket = None ##< the bucket the file belongs to s3 = aws.resource("s3") ##< The s3 instance @classmethod def GetBucket(cls, bucketName): try: cls.bucket = cls.s3.Bucket(bucketName) pass except botocore.exceptions.NoCredentialsError: pass pass @classmethod def Upload(cls, fileObj): if app.config["TESTING"]: return if cls.bucket is None: cls.GetBucket("shuttl.io") pass try: return cls.bucket.upload_file(fileObj.filePath, fileObj.filePath) except botocore.exceptions.NoCredentialsError: pass pass @classmethod def Delete(cls, fileObj, bucketName="shuttl.io"): if app.config["TESTING"]: return try: obj = cls.s3.Object(bucketName, fileObj.filePath) return obj.delete() except botocore.exceptions.ClientError, botocore.exceptions.NoCredentialsError: pass pass @classmethod def Download(cls, fileObj, bucketName="shuttl.io"): if app.config["TESTING"]: return try: obj = cls.s3.Object(bucketName, fileObj.filePath) return obj.download_file(fileObj.filePath) except botocore.exceptions.ClientError: raise FileNotFoundError("No such file or directory: {}".format(fileObj.filePath)) except botocore.exceptions.NoCredentialsError: pass pass
1,625
479
#!/usr/bin/python # # This script is used to analyze, tabulate, and graph data generated by # the JAM weaver and by JAMScript performance instrumentation. It was # used to produce figures presented in the experimental results section # of ``Efficient Runtime Enforcement Techniques for Policy Weaving,'' # published at FSE 2014. # import sys MAJOR = sys.version_info[0] import os import re import subprocess from subprocess import PIPE import shutil import time import imp from optparse import OptionParser #import warnings import grapher from resultsutil import AppStats, SourceVariant, Action, Section def collect_results_from_file(filepath, results): lns = cfg.get_lines(filepath) # Detect whether the file contains any results via the format. # %%% Does this short-circuit when it finds a match? isresults = any([True for ln in lns if ln.startswith(cfg.PROFILE_MARKER)]) if isresults: results.extend(lns) def collect_results(filelist): lines = [] for respath in filelist: assert os.path.exists(respath), "Results source does not exist: %s" % respath collect_results_from_file(respath, lines) if len(lines) == 0: cfg.warn("No results found") return lines #/collect_results def collect_separate_results(filelist): results = [] for respath in filelist: lines = [] assert os.path.exists(respath), "Results source does not exist: %s" % respath collect_results_from_file(respath, lines) if len(lines) == 0: cfg.warn("No results found for file: %s" % respath) results.append(lines) return results #/collect_separate_results def parse_profile_header(ln): assert ln.startswith(cfg.PROFILE_MARKER) action = ln[len(cfg.PROFILE_MARKER):-len(cfg.PROFILE_MARKER_TAIL)] if len(action) < 1: cfg.err("Action name is empty: %s" % ln) return action #/parse_profile_header def parse_section_header(ln): assert ln.startswith(cfg.SECTION_MARKER) info = None section = ln[len(cfg.SECTION_MARKER):-len(cfg.SECTION_MARKER)] if section.startswith(cfg.TIME_SECTION_NAME): info = section[len(cfg.TIME_SECTION_NAME):] info = info.lstrip(": ") section = cfg.TIME_SECTION_NAME if len(section) < 1: cfg.err("Section name is empty: %s" % ln) return section, info #/parse_section_header def parse_stack(lines): appinfo = None stackinfo = [] # Will contain (file, lineno) pairs in order. for ln in lines: parts = ln.split(None) # This can happen for unknown stack frames. if len(parts) < 3: continue flln = parts[2] srcfl, lineno = flln.rsplit(':', 1) stackinfo.append((srcfl, lineno)) for i in range(0, len(stackinfo)): idx = len(stackinfo) - 1 - i lastsrc = stackinfo[idx][0] if lastsrc.startswith('http://'): lastsrc = lastsrc[7:] # Get the init time. # %%% Ugly string matching if lastsrc.startswith(cfg.TEST_DIR): lastsrc = lastsrc[len(cfg.TEST_DIR):] for marker in ['/test.php?script=', '/test.php?sources[]=']: begin = -1 begin = lastsrc.find(marker) if begin > -1: begin += len(marker) lastsrc = lastsrc[begin:] end = lastsrc.find("&policy=") if end > -1: lastsrc = lastsrc[:end] break appinfo = cfg.get_file_info(lastsrc) if appinfo['app'] != 'libTx' and appinfo['app'] != 'auto' and appinfo['app'] != 'autoextra': break return appinfo, stackinfo #/parse_stack def process_data(appstats, variant, action, section, dataline): dataparts = dataline.split('/') # This method assumes the first data fed to it has column headers. section.addData(dataparts) #/process_data def parse_results(lines): curAppStats = None curVariant = None curActionDesc = None curAction = None curSection = None curBig = False stats = {} idxes = iter(range(0, len(lines))) for idx in idxes: ln = lines[idx].strip() if ln == '': continue isjunk = False for junk in cfg.JUNK_MARKERS: if ln.startswith(junk): isjunk = True if isjunk: continue if ln.startswith(cfg.ERROR_MARKER): errtxt = ln[len(cfg.ERROR_MARKER):] if curAction is not None: curAction.addError(errtxt) cfg.err("Error inkey action %s, variant %s, app %s: %s" % (curAction.description, curVariant.descriptor(), curAppStats.name, errtxt)) elif curVariant is not None: curVariant.addError(errtxt) cfg.err("Error in variant %s, app %s: %s" % (curVariant.descriptor(), curAppStats.name, errtxt)) elif curAppStats is not None: curAppStats.addError(errtxt) cfg.err("Error in app %s: %s" % (curAppStats.name, errtxt)) else: cfg.err("Unassociated error: %s" % errtxt) continue if ln.startswith(cfg.PROFILE_MARKER): curActionDesc = parse_profile_header(ln) elif ln.startswith(cfg.SECTION_MARKER): sect, sectinfo = parse_section_header(ln) curSection = Section(sect, sectinfo) assert curAction is not None curAction.addSection(curSection, cfg.TIME_SECTION_NAME) elif ln.startswith(cfg.STACK_MARKER): stack = [ln] # Look ahead to get whole stack. nextidx = idx + 1 while True: try: nextln = lines[nextidx] if nextln.startswith(cfg.STACK_MARKER): stack.append(nextln) next(idxes) # Exhaust the line from the iterator. nextidx += 1 else: break except Exception as e: cfg.err('While parsing stack: %s' % str(e)) break appinfo, stackInfo = parse_stack(stack) # Generate or retrieve the AppStats object. appname = appinfo['app'] # Parse the body HTML to determine whether it's a "big" test case. # %%% Yikes! if curActionDesc == 'init' and appname.startswith(cfg.SMS2PREFIX): if ln.find('.big.body.html') > -1 or appname.endswith('.big'): curBig = True else: curBig = False appkey = appname if appkey.startswith(cfg.SMS2PREFIX): if appkey.endswith('.big') or appkey.endswith('-big'): appkey = appkey[:-4] if appkey.endswith('-newcall'): appkey = appkey[:-8] elif appkey == 'jsqrcode-call': appkey = 'jsqrcode' if appkey in stats: curAppStats = stats[appkey] else: curAppStats = AppStats(appname) stats[appkey] = curAppStats descparts = appinfo['desc'] # "profile" is expected for all of these data. try: descparts.remove('profile') except: cfg.err('No "profile" component of variant description: %r' % appinfo) curVariant = curAppStats.getVariant(descparts) # The app/variant info is assumed to come right after the action. if appname.startswith(cfg.SMS2PREFIX) and curActionDesc == "compute": if curBig: curActionDesc = "bigcompute" curAction = curVariant.getAction(curActionDesc, stackInfo) else: assert curSection is not None, "No section info: %s" % ln process_data(curAppStats, curVariant, curAction, curSection, ln) return stats #/parse_results def print_all_times(stats): # Print the time stats for each app. for app, stat in stats.items(): cfg.out(stat.name) for i in stat.variants: cfg.out(str(stat.variants[i])) def print_times(app, actdesc, timemap): out = '%s/%s' % (app, actdesc) for v, t in timemap.items(): out += ' %s:%s' % (v, str(t)) cfg.out(out) def compare_sections(sect0, sect1, action, variant, app): allsame = True section = sect0.name # Compare keys for the two dicts. keys0 = set(sect0.rows.keys()) keys1 = set(sect1.rows.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Row '%s' not found in 2nd section '%s', action '%s', variant '%s' for app '%s'" % (a0, section, action, variant, app)) allsame = False for a1 in addl1: cfg.err("Row '%s' not found in 2nd section '%s', action '%s', variant '%s' for app '%s'" % (a1, section, action, variant, app)) allsame = False common = keys0 & keys1 for rowdesc in common: rowdata0 = sect0.rows[rowdesc] rowdata1 = sect1.rows[rowdesc] if rowdata0 != rowdata1: cfg.err("Data is inconsistent: %r != %r in section '%s', action '%s', variant '%s', app '%s'" % (rowdata0, rowdata1, section, action, variant, app)) allsame = False return allsame #/compare_sections def compare_actions(act0, act1, variant, app): allsame = True action = act0.description # Compare keys for the two dicts. keys0 = set(act0.sections.keys()) keys1 = set(act1.sections.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Section '%s' not found for 2nd action '%s' in variant '%s' for app '%s'" % (a0, action, variant, app)) allsame = False for a1 in addl1: cfg.err("Section '%s' not found for 1st action '%s' in variant '%s' for app '%s'" % (a1, action, variant, app)) allsame = False common = keys0 & keys1 for sectdesc in common: sectlist0 = act0.sections[sectdesc] sectlist1 = act1.sections[sectdesc] for sect0 in sectlist0: for sect1 in sectlist1: if not compare_sections(sect0, sect1, action, variant, app): allsame = False return allsame #/compare_actions def compare_variants(var0, var1, app): allsame = True variant = var0.descriptor() # Compare keys for the two dicts. keys0 = set(var0.actions.keys()) keys1 = set(var1.actions.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Action '%s' not found in 2nd variant '%s' for app '%s'" % (a0, variant, app)) allsame = False for a1 in addl1: cfg.err("Action '%s' not found in 1st variant '%s' for app '%s'" % (a1, variant, app)) allsame = False common = keys0 & keys1 for actdesc in common: act0 = var0.actions[actdesc] act1 = var1.actions[actdesc] if not compare_actions(act0, act1, variant, app): allsame = False return allsame def compare_stats(statsobj0, statsobj1): allsame = True app = statsobj0.name # Compare keys for the two dicts. keys0 = set(statsobj0.variants.keys()) keys1 = set(statsobj1.variants.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Variant '%s' not found in 2nd variant list for app '%s'" % (a0, app)) allsame = False for a1 in addl1: cfg.err("Variant '%s' not found in 1st variant list for app '%s'" % (a1, app)) allsame = False common = keys0 & keys1 for vardesc in common: variant0 = statsobj0.variants[vardesc] variant1 = statsobj1.variants[vardesc] if not compare_variants(variant0, variant1, app): allsame = False return allsame #/compare_stats def compare_results(stats0, stats1): allsame = True # Compare keys for the two dicts. keys0 = set(stats0.keys()) keys1 = set(stats1.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Application '%s' not found in 2nd stats list" % a0) allsame = False for a1 in addl1: cfg.err("Application '%s' not found in 1st stats list" % a1) allsame = False common = keys0 & keys1 for app in common: stats0obj = stats0[app] stats1obj = stats1[app] if not compare_stats(stats0obj, stats1obj): allsame = False if allsame: cfg.out("Statistics match exactly") #/compare_results def compare_actions_times(act0, act1, variant, app): allsame = True action = act0.description t0 = act0.avg_time() t1 = act1.avg_time() diff = t0 - t1 if diff < 0.0: fast = -1 faststr = '0 is faster' diff = -diff ratio = t0 / t1 elif diff > 0.0: fast = 1 faststr = '1 is faster' ratio = t1 / t0 else: fast = 0 faststr = 'Same time' ratio = 1.0 cfg.out("%s for app '%s', variant '%s', action '%s', difference: %.2f, ratio: %.2f" % (faststr, app, variant, action, diff, ratio)) return (fast, diff, ratio) #/compare_actions_times def compare_variants_times(var0, var1, app): allsame = True variant = var0.descriptor() # Compare keys for the two dicts. keys0 = set(var0.actions.keys()) keys1 = set(var1.actions.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Action '%s' not found in 2nd variant '%s' for app '%s'" % (a0, variant, app)) for a1 in addl1: cfg.err("Action '%s' not found in 1st variant '%s' for app '%s'" % (a1, variant, app)) common = keys0 & keys1 for actdesc in common: act0 = var0.actions[actdesc] act1 = var1.actions[actdesc] fast, diff, ratio = compare_actions_times(act0, act1, variant, app) # %%% Do something with these #/compare_variants_times def compare_stats_times(statsobj0, statsobj1): app = statsobj0.name # Compare keys for the two dicts. keys0 = set(statsobj0.variants.keys()) keys1 = set(statsobj1.variants.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Variant '%s' not found in 2nd variant list for app '%s'" % (a0, app)) for a1 in addl1: cfg.err("Variant '%s' not found in 1st variant list for app '%s'" % (a1, app)) common = keys0 & keys1 for vardesc in common: variant0 = statsobj0.variants[vardesc] variant1 = statsobj1.variants[vardesc] compare_variants_times(variant0, variant1, app) #/compare_stats_times def compare_times(stats0, stats1): # Compare keys for the two dicts. keys0 = set(stats0.keys()) keys1 = set(stats1.keys()) addl0 = keys0 - keys1 addl1 = keys1 - keys0 for a0 in addl0: cfg.err("Application '%s' not found in 2nd stats list" % a0) for a1 in addl1: cfg.err("Application '%s' not found in 1st stats list" % a1) common = keys0 & keys1 for app in common: stats0obj = stats0[app] stats1obj = stats1[app] compare_stats_times(stats0obj, stats1obj) #/compare_times # Get a list of actions recorded in the given |AppStats| object. def load_actions(stat): actions = [] for vardesc in stat.variants: for actdesc in stat.variants[vardesc].actions: if actdesc not in actions: actions.append(actdesc) return actions # Get a filtered list of variants for the given application/action. # An exception may be thrown if some variants are not available. def load_variants(stat, actdesc): app = stat.name variants = {} for vardesc in cfg.VARIANTS: if vardesc in stat.variants: variant = stat.variants[vardesc] if actdesc not in variant.actions: raise Exception("Data not available for action: %s %s %s" % (app, vardesc, actdesc)) variants[vardesc] = variant else: raise Exception("Variant data not available: %s %s" % (app, vardesc)) assert len(variants) == len(cfg.VARIANTS) return variants def print_time_comparison(stats): apps = stats.keys() apps.sort() for app in apps: stat = stats[app] actdescs = load_actions(stat) for actdesc in actdescs: try: variants = load_variants(stat, actdesc) times = {} for vardesc, variant in variants.items(): times[vardesc] = variant.actions[actdesc].avg_time() print_times(app, actdesc, times) except Exception as e: cfg.err('Time comparison for %s/%s: %s' (app, actdesc, str(e))) def updateMinMax(minmax, tm, sub, desc): curmin = minmax[sub]['mintime'] if tm < curmin: minmax[sub]['mintime'] = tm minmax[sub]['minapp'] = desc curmax = minmax[sub]['maxtime'] if tm > curmax: minmax[sub]['maxtime'] = tm minmax[sub]['maxapp'] = desc # Create graphs. def generate_graphs(stats): timelist = [] apps = list(stats.keys()) apps.sort() minmax = { 'overall': { 'mintime': float('inf'), 'maxtime': 0.0, 'minapp': None, 'maxapp': None, }, 'init': { 'mintime': float('inf'), 'maxtime': 0.0, 'minapp': None, 'maxapp': None, } } for app in apps: if app in cfg.DISABLED: continue stat = stats[app] actdescs = load_actions(stat) for actdesc in actdescs: # Optionally group init and load times together if cfg.INCLUDE_INIT and actdesc == 'init': continue if cfg.SUPPRESS_SMS2_LOAD and actdesc == 'load' and app.startswith(cfg.SMS2PREFIX): continue variants = load_variants(stat, actdesc) times = {} times['action'] = actdesc for vardesc, variant in variants.items(): tm = variant.actions[actdesc].avg_time() desc = app + '/' + actdesc + '/' + vardesc # Optionally add on policy.js and libTx.js load time. if cfg.INCLUDE_INIT: if actdesc == 'load' and vardesc != 'input': if 'init' in variant.actions: inittm = variant.actions['init'].avg_time() updateMinMax(minmax, inittm, 'init', desc) tm += inittm else: cfg.warn("No init time for load: %s/%s" % (app, vardesc)) elif actdesc == 'init' and vardesc != 'input': updateMinMax(minmax, inittm, 'init', desc) times[vardesc] = tm updateMinMax(minmax, tm, 'overall', desc) # Check for zero/negative times. ok = True for vardesc, time in times.items(): if vardesc == 'action': continue if time <= 0: cfg.err("NON-POSITIVE TIME: %s/%s/%s/%.2f" % (app, actdesc, vardesc, time)) ok = False if not ok: continue # Check for cases where woven performs worse than modular # on long-duration base case. time0 = float(times[cfg.VARIANTS[0]]) time1 = float(times[cfg.VARIANTS[1]]) time2 = float(times[cfg.VARIANTS[2]]) disp0 = cfg.VARIANT_DISPLAY[0].upper() disp1 = cfg.VARIANT_DISPLAY[1].upper() disp2 = cfg.VARIANT_DISPLAY[2].upper() if time2 > time1 and time0 > 100.0: cfg.warn("LONG-DURATION OUTLIER: %s/%s/%.2f/%.2f/%.2f" % (app, actdesc, time0, time1, time2)) if actdesc != 'init' and time0 < 0.1 or time1 < 0.1 or time2 < 0.1: cfg.warn("TINY TIME: %s/%s/%.2f/%.2f/%.2f" % (app, actdesc, time0, time1, time2)) # Check for cases where secure code is faster than unprotected. if actdesc != 'init' and time2 / time0 <= 0.90: cfg.warn("%s UNDERLIER: %s/%s %.2f" % (disp2, app, actdesc, time2 / time0)) if actdesc != 'init' and time1 / time0 < 0.90: cfg.warn("%s UNDERLIER: %s/%s %.2f" % (disp1, app, actdesc, time1 / time0)) if time2 / time1 > 1.5: cfg.warn("LARGE %s/%s RATIO: %s/%s/%.2f/%.2f" % (disp2, disp1, app, actdesc, time2 / time1, time0)) if actdesc != 'init' and actdesc != 'load' and time2 / time0 > 5: cfg.warn("LARGE %s/%s RATIO: %s/%s/%.2f" % (disp2, disp0, app, actdesc, time2 / time0)) timelist.append(times) grapher.modularVsWovenOverheadByOriginal(timelist, cfg.VARIANTS, cfg.VARIANT_DISPLAY) #grapher.modularVsWovenOverhead(timelist, False) grapher.modularVsWovenOverhead(timelist, True) grapher.wovenOverheadByOriginal(timelist, cfg.VARIANTS, cfg.VARIANT_DISPLAY) cfg.out("MIN INIT TIME: %s/%s" % (minmax['init']['mintime'], minmax['init']['minapp'])) cfg.out("MAX INIT TIME: %s/%s" % (minmax['init']['maxtime'], minmax['init']['maxapp'])) cfg.out("MIN ACTION TIME: %s/%s" % (minmax['overall']['mintime'], minmax['overall']['minapp'])) cfg.out("MAX ACTION TIME: %s/%s" % (minmax['overall']['maxtime'], minmax['overall']['maxapp'])) #/generate_graphs def generate_output(stats): #print_all_times(stats) #print_time_comparison(stats) generate_graphs(stats) #/generate_output def main(): parser = OptionParser(usage="%prog results.txt") parser.add_option('-c', '--config', action='store', default=os.path.join(os.path.dirname(__file__), 'resultsconfig.py'), dest='config', help='configuration.py file') parser.add_option('-v', '--verbose', action='store_true', default=False, dest='verbose', help='generate verbose output') parser.add_option('-a', '--analysis', action='store', default='t', dest='analysis', help='t: fine-grained vs. coarsed-grain runtime; c: compare profile information across results files; m: compare running time across results files; a: all') opts, args = parser.parse_args() #warnings.simplefilter('error', UserWarning) global cfg cfg = imp.load_source("cfg", opts.config) global VERBOSE VERBOSE = opts.verbose analysis = opts.analysis if analysis not in ['a', 't', 'c', 'm']: parser.error("Invalid analysis identifier: %s" % analysis) if len(args) == 0: assert os.path.exists(cfg.RESULTS_SOURCE), "Default results path %s doesn't exist." % cfg.RESULTS_SOURCE resultsfiles = [os.path.join(cfg.RESULTS_SOURCE, fl) for fl in os.listdir(cfg.RESULTS_SOURCE)] else: resultsfiles = [] for resfile in args: if not os.path.exists(resfile): cfg.warn("results file %s doesn't exist." % resfile) if os.path.isdir(resfile): # Doesn't recurse. resultsfiles.extend([os.path.join(resfile, filename) for filename in os.listdir(resfile)]) else: resultsfiles.append(resfile) if len(resultsfiles) == 0: parser.error("No results files found") if analysis == 'a' or analysis == 't': resultstxt = collect_results(resultsfiles) stats = parse_results(resultstxt) generate_output(stats) if analysis in ['a', 'c', 'm']: if len(resultsfiles) != 2: parser.error("Size of results list != 2") resultslist = collect_separate_results(resultsfiles) stats0 = parse_results(resultslist[0]) stats1 = parse_results(resultslist[1]) if analysis in ['a', 'c']: compare_results(stats0, stats1) if analysis in ['a', 'm']: compare_times(stats0, stats1) #/main if __name__ == "__main__": main()
21,822
7,820
# Copyright (c) 2011-2013 Peng Sun. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the COPYRIGHT file. # hone_control.py # a placeholder file for any control jobs HONE runtime generates
245
80