text
stringlengths
2
999k
import MenuMaker from MenuMaker import indent, writeFullMenu menuFile = "~/.config/openbox/menu.xml" def _map(x) : for d, s in (("&amp;", "&"), ("\'", "\"")) : x = x.replace(s, d) return x class Sep(object) : def emit(self, level) : return ['%s<separator/>' % indent(level)] class App(object) : def emit(self, level) : x = indent(level) xx = indent(level + 1) cmd = self.app.execmd if self.app.terminal : cmd = MenuMaker.terminal.runCmd(cmd) return [ '%s<item label="%s"> <action name="Execute">' % (x, _map(self.app.name)), '%s<execute>%s</execute>' % (xx, cmd), '%s</action> </item>' % x ] class Menu(object) : id = 0 def __init__(self) : super(Menu, self).__init__() self.id = Menu.id Menu.id += 1 def emit(self, level) : menu = ['%s<menu id="%s" label="%s">' % (indent(level), self.id, _map(self.name))] for x in self : menu += x.emit(level + 1) menu.append('%s</menu>' % indent(level)) return menu class Root(object) : name = "OpenBox 3" def __init__(self, subs) : if writeFullMenu : subs += [MenuMaker.Sep(), SysMenu()] subs += [MenuMaker.Sep(), Oblogout()] super(Root, self).__init__(subs) self.id = "root-menu" def emit(self, level) : if writeFullMenu : menu = [ '<?xml version="1.0" encoding="UTF-8"?>', '<openbox_menu>' ] menu += super(Root, self).emit(level + 1) menu.append('</openbox_menu>') return menu else : menu = [] for x in self : menu += x.emit(level) return menu class SysMenu(MenuMaker.Menu) : name = "OpenBox" def __init__(self) : subs = [ X('<menu id="client-list-menu"/>'), X('<item label="Reconfigure"> <action name="Reconfigure"/> </item>'), MenuMaker.Sep(), X('<item label="Exit"> <action name="Exit"/> </item>') ] super(SysMenu, self).__init__(subs) self.align = MenuMaker.Entry.StickBottom class Oblogout(MenuMaker.Menu) : name = "Oblogout" def __init__(self) : subs = [ X('<menu id="client-list-menu"/>'), X('<item label="Sesion"> <action name="Execute"> <execute> sudo oblogout </execute> </action> </item>'), X('<item label="Reiniciar"> <action name="Execute"> <prompt> Seguro que quieres reiniciar? </prompt> <execute> sudo shutdown -r now </execute> </action> </item>'), X('<item label="Apagar"> <action name="Execute"> <prompt> Seguro que quieres apagar? </prompt> <execute> sudo shutdown -h now </execute> </action> </item>'), ] super(Oblogout, self).__init__(subs) self.align = MenuMaker.Entry.StickBottom class X(MenuMaker.Entry) : def __init__(self, x) : super(X, self).__init__() self.align = MenuMaker.Entry.StickBottom self.x = x def emit(self, level) : return [indent(level) + self.x]
from logging import getLogger from aiohttp import ClientSession class PostmarkClient(object): """ Example: curl "https://api.postmarkapp.com/email" \ -X POST \ -H "Accept: application/json" \ -H "Content-Type: application/json" \ -H "X-Postmark-Server-Token: server token" \ -d "{From: 'sender@example.com', To: 'receiver@example.com', Subject: 'Postmark test', HtmlBody: '<html><body><strong>Hello</strong> dear Postmark user.</body></html>'}" """ def __init__(self, server_token): self.__log = getLogger('aiopm') self._server_token = server_token async def send(self, From, To, Subject, HtmlBody): """ Send a single email @see: https://postmarkapp.com/developer/api/email-api """ url = 'https://api.postmarkapp.com/email' payload = { 'From': From, 'To': To, 'Subject': Subject, 'HtmlBody': HtmlBody, } headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Postmark-Server-Token': self._server_token, } # async with ClientSession() as session: async with session.post(url, json=payload, headers=headers) as resp: self.__log.debug("status = {status!r}".format(status=resp.status)) if resp.status != 200: content = await resp.text() raise RuntimeError('Postmark response code {status} and content {content}'.format(status=resp.status, content=content)) values = await resp.json() return values async def sendWithTemplate(self, From, To, TemplateAlias, TemplateModel, Tag=None): """ Send email with template @see: https://postmarkapp.com/developer/api/templates-api#email-with-template """ url = 'https://api.postmarkapp.com/email/withTemplate' payload = { 'TemplateAlias': TemplateAlias, 'TemplateModel': TemplateModel, 'From': From, 'To': To, 'Tag': Tag, } headers = { 'Accept': 'application/json', 'Content-Type': 'application/json', 'X-Postmark-Server-Token': self._server_token, } # async with ClientSession() as session: async with session.post(url, json=payload, headers=headers) as resp: self.__log.debug("status = {status!r}".format(status=resp.status)) if resp.status != 200: content = await resp.text() raise RuntimeError('Postmark response code {status} and content {content}'.format(status=resp.status, content=content)) values = await resp.json() return values
# coding=utf-8 # @File : Using_API.py # @Author: PuJi # @Date : 2018/4/3 0003 import sys from fileHelper.ipfs_module import ulord_transmitter from dbHelper import dbhelper from fileHelper import util import json import requests # metadata = {"license": "ULORD Inc", "description": "", # 1 # "language": "en", # 1 # "title": "What is LBRY?", # 1 # "author": "Samuel Bryan", # 1 # "nsfw": False, # 1 # "licenseUrl": "", # 1 # "preview": "", "thumbnail": "https://s3.amazonaws.com/files.lbry.io/logo.png", # 1 # "tag": ["action"]} # # source = { # "source": "d5169241150022f996fa7cd6a9a1c421937276a3275eb912790bd07ba7aec1fac5fd45431d226b8fb402691e79aeb24b", # "contentType": "video/mp4"} # # fee = {"currency": "ULD", "address": "uW9sdd8AtnNAzMdTD6r9bKmvURSzd2rgXU", # 2 # "amount": 1.2} # # username = 'justin' # # sourcename = 'go-ipfs_v0.4.14_linux-amd64.tar.gz' source_metadata = { 'metadata': { "license": "LBRY Inc", "description": "What is LBRY? An introduction with Alex Tabarrok", "language": "en", "title": "Test", "author": "CAOLINAN", "nsfw": False, "licenseUrl": "", "preview": "", "thumbnail": "https://s3.amazonaws.com/files.lbry.io/logo.png", "tag": ["action"] }, 'source_hash' : "d5169241150022f996fa7cd6a9a1c421937276a3275eb912790bd07ba7aec1fac5fd45431d226b8fb402691e79aeb24b", 'content_type' : "video/mp4", 'currency' : "ULD", 'amount' : 1.2, 'username' : 'default_wallet', 'sourcename' : r'E:\ipfs\go-ipfs\ipfs.exe', 'password':'123' } def publish(upload_file): url = 'http://192.168.14.67:5000/v1/transactions/publish/' source_metadata['sourcename'] = upload_file source_metadata['metadata']['author'] = source_metadata.get('username') source_metadata['content_type'] = util.getType(source_metadata.get('sourcename')) # source_metadata['source_hash'] = ulord_transmitter.upload(source_metadata.get('sourcename')) # TODO change the schema upoload_hash = ulord_transmitter.upload(source_metadata.get('sourcename')) # save file info in DB # new_file = dbhelper.File(name=util.getName(source_metadata.get('sourcename')), hash=source_metadata['source_hash']) new_file = dbhelper.File(name=util.getName(source_metadata.get('sourcename')), hash=upoload_hash) source_metadata['sourcename'] = util.getPureName(source_metadata.get('sourcename')) try: dbhelper.session.add(new_file) dbhelper.session.commit() dbhelper.session.close() except: print("Error save data in DB.Rollback.") dbhelper.session.rollback() if source_metadata['source_hash']: import pprint pprint.pprint(source_metadata) # print("hash:{}".format(source_metadata.get('source_hash'))) print("hash:{}".format(upoload_hash)) res = requests.post(url, json=source_metadata, headers={'appkey': "03e410a136ec11e8adaff48e3889c8ab"}) print("res:{}".format(res)) # print(res.json()) print (res.json().get('reason')) # user register def register(): url = 'http://192.168.14.67:5000/v1/users/reg/' meta_data = { 'username' : 'username', 'password' : 'password', 'email' : 'email', 'appname' : 'appname', 'appdes' : 'appdes' } json_string = json.dumps(meta_data) print(json_string) appkey = requests.post(url, json=json.loads(json_string)) print (appkey.json()) def login(username, password): url = 'http://192.168.14.67:5000/v1/users/login/' meta_data = { 'username': username, 'password': password } json_string = json.dumps(meta_data) appkey = requests.post(url, json=json.loads(json_string)) print (appkey.json().get()) def download(hash): ulord_transmitter.downloadhash(hash) try: print(hash) print(type(hash)) current_file = dbhelper.session.query(dbhelper.File).filter(dbhelper.File.hash == hash).one() # dbhelper.session.commit() # dbhelper.session.close() # print (current_file.name) util.changeName(hash, current_file.name) except: print("Error query data in DB.Rollback.") dbhelper.session.rollback() if __name__ == '__main__': # login() print(sys.argv) arg = sys.argv[1] if arg == 'register': register() elif arg == 'login': login(sys.argv[2], sys.argv[3]) elif arg == 'publish': if len(sys.argv) == 3: publish(sys.argv[2]) else: publish(r'E:\ipfs\go-ipfs\ipfs.exe') elif arg == 'download': download(sys.argv[2])
from datetime import datetime from app import db from app.models import Vote from app.services.event_service import check_vote_event def vote_exists(user_id, idea_id): return get_vote(user_id, idea_id) is not None def get_vote(user_id, idea_id): return db.session.query(Vote).filter_by(user_id=user_id, idea_id=idea_id).first() def get_votes(idea_id): return db.session.query(Vote).filter_by(idea_id=idea_id).order_by(Vote.modified.desc()).all() def get_vote_by_id(vote_id): return db.session.query(Vote).filter_by(id=vote_id).first() def get_all_votes(): return db.session.query(Vote).all() def edit_vote(vote_id, value): db.session.query(Vote).filter_by(id=vote_id).update({ Vote.value: value, Vote.modified: datetime.utcnow() }) db.session.commit() def save_vote(vote): db.session.add(vote) db.session.commit() check_vote_event(vote) def delete_vote_by_id(vote_id): db.session.query(Vote).filter_by(id=vote_id).delete(synchronize_session='fetch') db.session.commit() def delete_votes_for_user(user_id): db.session.query(Vote).filter_by(user_id=user_id).delete(synchronize_session='fetch') db.session.commit() def delete_votes_for_idea(idea_id): db.session.query(Vote).filter_by(idea_id=idea_id).delete(synchronize_session='fetch') db.session.commit() def delete_all_votes(): db.session.query(Vote).delete(synchronize_session='fetch') db.session.commit()
# -*- coding: utf-8 -*- import logging import sys import click import click_config_file from coloredlogs import ColoredFormatter from . import __version__ from .engine import USBQEngine from .opts import add_options from .opts import network_options from .opts import pcap_options from .opts import standard_plugin_options from .opts import usb_device_options from .pm import AVAILABLE_PLUGINS from .pm import enable_plugins from .pm import enable_tracing from .pm import pm __all__ = [] log = logging.getLogger(__name__) CMD_NAME = 'usbq' CONFIG_FILE = CMD_NAME + '.cfg' FORMAT = '%(levelname)8s [%(name)24s]: %(message)s' LOG_FIELD_STYLES = { 'asctime': {'color': 'green'}, 'hostname': {'color': 'magenta'}, 'levelname': {'color': 'green', 'bold': True}, 'name': {'color': 'blue'}, 'programname': {'color': 'cyan'}, } def _setup_logging(logfile, debug): if debug: level = logging.DEBUG else: level = logging.INFO # Turn on logging root = logging.getLogger() root.setLevel(level) # shush little ones for mod in ['scapy.loading', 'scapy.runtime']: logging.getLogger(mod).setLevel(logging.CRITICAL) # Colors and formats ch = logging.StreamHandler(sys.stdout) ch.setLevel(level) fh = logging.FileHandler(logfile, 'w') fh.setLevel(level) formatter = ColoredFormatter(fmt=FORMAT, field_styles=LOG_FIELD_STYLES) ch.setFormatter(formatter) fh.setFormatter(logging.Formatter(FORMAT)) root.addHandler(ch) root.addHandler(fh) @click.group(invoke_without_command=True) @click.option('--debug', is_flag=True, default=False, help='Enable usbq debug logging.') @click.option( '--logfile', type=click.Path(writable=True, dir_okay=False), default='debug.log', help='Logfile for --debug output', ) @click.option('--trace', is_flag=True, default=False, help='Trace plugins.') @click.option( '--dump', is_flag=True, default=False, help='Dump USBQ packets to console.' ) @click.option( '--disable-plugin', type=str, multiple=True, default=[], help='Disable plugin' ) @click.option( '--enable-plugin', type=str, multiple=True, default=[], help='Enable plugin' ) @click.pass_context @click_config_file.configuration_option(cmd_name='usbq', config_file_name='usbq.cfg') def main(ctx, debug, trace, logfile, **kwargs): '''USBQ: Python programming framework for monitoring and modifying USB communications.''' ctx.ensure_object(dict) ctx.obj['dump'] = ctx.params['dump'] ctx.obj['enable_plugin'] = ctx.params['enable_plugin'] ctx.obj['disable_plugin'] = ctx.params['disable_plugin'] if ctx.invoked_subcommand is None: click.echo(f'usbq version {__version__}\n') click.echo(ctx.get_help()) click.echo('\nAvailable plugins:\n') for pd in sorted(AVAILABLE_PLUGINS.values(), key=lambda pd: pd.name): click.echo(f'- {pd.name}: {pd.desc}') click.echo( f'\nDefault config file: {click.get_app_dir(CMD_NAME)}/{CONFIG_FILE}' ) else: _setup_logging(logfile, debug) if trace: enable_tracing() return 0 # # Commands # @main.command() @click.pass_context @add_options(network_options) @add_options(pcap_options) @add_options(usb_device_options) def mitm(ctx, proxy_addr, proxy_port, listen_addr, listen_port, pcap, usb_id): 'Man-in-the-Middle USB device to host communications.' enable_plugins( pm, standard_plugin_options( proxy_addr, proxy_port, listen_addr, listen_port, pcap, dump=ctx.obj['dump'] ) + [('lookfor', {'usb_id': usb_id})], disabled=ctx.obj['disable_plugin'], enabled=ctx.obj['enable_plugin'], ) USBQEngine().run() if __name__ == "__main__": sys.exit(main()) # pragma: no cover
# Copyright (c) Microsoft Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import asyncio from pathlib import Path from typing import Any, Dict, List, Set, Union from playwright._impl._api_types import Error from playwright._impl._connection import ChannelOwner from playwright._impl._helper import async_readfile class Selectors: def __init__(self, loop: asyncio.AbstractEventLoop, dispatcher_fiber: Any) -> None: self._loop = loop self._channels: Set[SelectorsOwner] = set() self._registrations: List[Dict] = [] self._dispatcher_fiber = dispatcher_fiber async def register( self, name: str, script: str = None, path: Union[str, Path] = None, contentScript: bool = None, ) -> None: if not script and not path: raise Error("Either source or path should be specified") if path: script = (await async_readfile(path)).decode() params: Dict[str, Any] = dict(name=name, source=script) if contentScript: params["contentScript"] = True for channel in self._channels: await channel._channel.send("register", params) self._registrations.append(params) def _add_channel(self, channel: "SelectorsOwner") -> None: self._channels.add(channel) for params in self._registrations: # This should not fail except for connection closure, but just in case we catch. channel._channel.send_no_reply("register", params) def _remove_channel(self, channel: "SelectorsOwner") -> None: if channel in self._channels: self._channels.remove(channel) class SelectorsOwner(ChannelOwner): def __init__( self, parent: ChannelOwner, type: str, guid: str, initializer: Dict ) -> None: super().__init__(parent, type, guid, initializer)
import logging # General Settings CONTROL_PHONES = ['4912312312312',] LOG_DIR = '/var/log/smsgateway/' LOG_STD_LEVEL = logging.DEBUG SMS_DIR = '/var/spool/sms/outgoing/' SYSTEMCTL_PATH = '/bin/systemctl' # Use 'which systemctl' to find out REBOOT_PATH = '/sbin/reboot' # When using a ramdisk you can use the script instead (see README): # REBOOT_PATH = '/usr/local/bin/pi-reboot' APT_PATH = '/usr/bin/apt' SUDO_PATH = '/usr/bin/sudo' # Encryption # Generate with python -c 'from cryptography.fernet import Fernet as f; print(f.generate_key())' KEY = "xyz=" # Matrix MATRIX_CREDENTIALS = ('@me:matrix.org', 'mysecretpassword') MATRIX_HS_URL = "https://matrix.org" # Telegram TELEGRAM_CLI_PATH = '/home/smsd/tg/bin/telegram-cli' TELEGRAM_KEY_PATH = '/home/smsd/tg/tg-server.pub' TG_USER_ID = 164387030 # Signal SIGNAL_NUMBER = '+4912312312312' SIGNAL_CLI_PATH = '/usr/local/bin/signal-cli' # Facebook FB_COOKIE_PATH = '/home/smsd/facebook-session.txt' FB_CREDENTIALS = ('me@example.com', 'mysecretpassword') # Slack SL_TOKEN = 'xxxx' # E-Mail EMAIL_ACCOUNTS = { 'example_name': { 'Host': 'imap.example.com', 'User': 'somebody@example.com', 'Password': 'mysecretpassword' } } # WhatsApp WA_NUMBER = '4912312312312' WA_USER_ALIASES = { '4912312312312' : 'Friend 1', '4912312312313' : 'Friend 2', } WA_GROUP_ALIASES = { '123123123' : 'Group 1', '123123123123' : 'Group 2', } # Command settings SERVICES = ['dhcpcd', 'sane-forwards', 'source-telegram', 'source-whatsapp', 'source-signal', 'source-slack', 'source-email', 'source-facebook', 'smstools', 'wpa_supplicant', 'ModemManager', 'NetworkManager', ] WIFI_INTERFACES_ALL = ['wlan0', 'wlan1'] NETWORK_INTERFACES_ALL = ['eth0'] + WIFI_INTERFACES_ALL WIFI_INTERFACES = ['wlan0'] NETWORK_INTERFACES = ['eth0']
# -*- coding: utf-8 -*- """ Module for formatting output data in HTML. """ from collections import OrderedDict from textwrap import dedent from pandas._config import get_option from pandas.compat import lzip from pandas.core.dtypes.generic import ABCMultiIndex from pandas import option_context from pandas.io.common import _is_url from pandas.io.formats.format import TableFormatter, get_level_lengths from pandas.io.formats.printing import pprint_thing class HTMLFormatter(TableFormatter): """ Internal class for formatting output data in html. This class is intended for shared functionality between DataFrame.to_html() and DataFrame._repr_html_(). Any logic in common with other output formatting methods should ideally be inherited from classes in format.py and this class responsible for only producing html markup. """ indent_delta = 2 def __init__(self, formatter, classes=None, border=None): self.fmt = formatter self.classes = classes self.frame = self.fmt.frame self.columns = self.fmt.tr_frame.columns self.elements = [] self.bold_rows = self.fmt.kwds.get('bold_rows', False) self.escape = self.fmt.kwds.get('escape', True) self.show_dimensions = self.fmt.show_dimensions if border is None: border = get_option('display.html.border') self.border = border self.table_id = self.fmt.table_id self.render_links = self.fmt.render_links @property def show_row_idx_names(self): return self.fmt.show_row_idx_names @property def show_col_idx_names(self): return self.fmt.show_col_idx_names @property def row_levels(self): if self.fmt.index: # showing (row) index return self.frame.index.nlevels elif self.show_col_idx_names: # see gh-22579 # Column misalignment also occurs for # a standard index when the columns index is named. # If the row index is not displayed a column of # blank cells need to be included before the DataFrame values. return 1 # not showing (row) index return 0 def _get_columns_formatted_values(self): return self.columns @property def is_truncated(self): return self.fmt.is_truncated @property def ncols(self): return len(self.fmt.tr_frame.columns) def write(self, s, indent=0): rs = pprint_thing(s) self.elements.append(' ' * indent + rs) def write_th(self, s, indent=0, tags=None): if self.fmt.col_space is not None and self.fmt.col_space > 0: tags = (tags or "") tags += ('style="min-width: {colspace};"' .format(colspace=self.fmt.col_space)) return self._write_cell(s, kind='th', indent=indent, tags=tags) def write_td(self, s, indent=0, tags=None): return self._write_cell(s, kind='td', indent=indent, tags=tags) def _write_cell(self, s, kind='td', indent=0, tags=None): if tags is not None: start_tag = '<{kind} {tags}>'.format(kind=kind, tags=tags) else: start_tag = '<{kind}>'.format(kind=kind) if self.escape: # escape & first to prevent double escaping of & esc = OrderedDict([('&', r'&amp;'), ('<', r'&lt;'), ('>', r'&gt;')]) else: esc = {} rs = pprint_thing(s, escape_chars=esc).strip() if self.render_links and _is_url(rs): rs_unescaped = pprint_thing(s, escape_chars={}).strip() start_tag += '<a href="{url}" target="_blank">'.format( url=rs_unescaped) end_a = '</a>' else: end_a = '' self.write('{start}{rs}{end_a}</{kind}>'.format( start=start_tag, rs=rs, end_a=end_a, kind=kind), indent) def write_tr(self, line, indent=0, indent_delta=0, header=False, align=None, tags=None, nindex_levels=0): if tags is None: tags = {} if align is None: self.write('<tr>', indent) else: self.write('<tr style="text-align: {align};">' .format(align=align), indent) indent += indent_delta for i, s in enumerate(line): val_tag = tags.get(i, None) if header or (self.bold_rows and i < nindex_levels): self.write_th(s, indent, tags=val_tag) else: self.write_td(s, indent, tags=val_tag) indent -= indent_delta self.write('</tr>', indent) def render(self): self._write_table() if self.should_show_dimensions: by = chr(215) # × self.write('<p>{rows} rows {by} {cols} columns</p>' .format(rows=len(self.frame), by=by, cols=len(self.frame.columns))) return self.elements def _write_table(self, indent=0): _classes = ['dataframe'] # Default class. use_mathjax = get_option("display.html.use_mathjax") if not use_mathjax: _classes.append('tex2jax_ignore') if self.classes is not None: if isinstance(self.classes, str): self.classes = self.classes.split() if not isinstance(self.classes, (list, tuple)): raise TypeError('classes must be a string, list, or tuple, ' 'not {typ}'.format(typ=type(self.classes))) _classes.extend(self.classes) if self.table_id is None: id_section = "" else: id_section = ' id="{table_id}"'.format(table_id=self.table_id) self.write('<table border="{border}" class="{cls}"{id_section}>' .format(border=self.border, cls=' '.join(_classes), id_section=id_section), indent) if self.fmt.header or self.show_row_idx_names: self._write_header(indent + self.indent_delta) self._write_body(indent + self.indent_delta) self.write('</table>', indent) def _write_col_header(self, indent): truncate_h = self.fmt.truncate_h if isinstance(self.columns, ABCMultiIndex): template = 'colspan="{span:d}" halign="left"' if self.fmt.sparsify: # GH3547 sentinel = object() else: sentinel = False levels = self.columns.format(sparsify=sentinel, adjoin=False, names=False) level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 for lnum, (records, values) in enumerate(zip(level_lengths, levels)): if truncate_h: # modify the header lines ins_col = self.fmt.tr_col_num if self.fmt.sparsify: recs_new = {} # Increment tags after ... col. for tag, span in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span elif tag + span > ins_col: recs_new[tag] = span + 1 if lnum == inner_lvl: values = (values[:ins_col] + ('...',) + values[ins_col:]) else: # sparse col headers do not receive a ... values = (values[:ins_col] + (values[ins_col - 1], ) + values[ins_col:]) else: recs_new[tag] = span # if ins_col lies between tags, all col headers # get ... if tag + span == ins_col: recs_new[ins_col] = 1 values = (values[:ins_col] + ('...',) + values[ins_col:]) records = recs_new inner_lvl = len(level_lengths) - 1 if lnum == inner_lvl: records[ins_col] = 1 else: recs_new = {} for tag, span in list(records.items()): if tag >= ins_col: recs_new[tag + 1] = span else: recs_new[tag] = span recs_new[ins_col] = 1 records = recs_new values = (values[:ins_col] + ['...'] + values[ins_col:]) # see gh-22579 # Column Offset Bug with to_html(index=False) with # MultiIndex Columns and Index. # Initially fill row with blank cells before column names. # TODO: Refactor to remove code duplication with code # block below for standard columns index. row = [''] * (self.row_levels - 1) if self.fmt.index or self.show_col_idx_names: # see gh-22747 # If to_html(index_names=False) do not show columns # index names. # TODO: Refactor to use _get_column_name_list from # DataFrameFormatter class and create a # _get_formatted_column_labels function for code # parity with DataFrameFormatter class. if self.fmt.show_index_names: name = self.columns.names[lnum] row.append(pprint_thing(name or '')) else: row.append('') tags = {} j = len(row) for i, v in enumerate(values): if i in records: if records[i] > 1: tags[j] = template.format(span=records[i]) else: continue j += 1 row.append(v) self.write_tr(row, indent, self.indent_delta, tags=tags, header=True) else: # see gh-22579 # Column misalignment also occurs for # a standard index when the columns index is named. # Initially fill row with blank cells before column names. # TODO: Refactor to remove code duplication with code block # above for columns MultiIndex. row = [''] * (self.row_levels - 1) if self.fmt.index or self.show_col_idx_names: # see gh-22747 # If to_html(index_names=False) do not show columns # index names. # TODO: Refactor to use _get_column_name_list from # DataFrameFormatter class. if self.fmt.show_index_names: row.append(self.columns.name or '') else: row.append('') row.extend(self._get_columns_formatted_values()) align = self.fmt.justify if truncate_h: ins_col = self.row_levels + self.fmt.tr_col_num row.insert(ins_col, '...') self.write_tr(row, indent, self.indent_delta, header=True, align=align) def _write_row_header(self, indent): truncate_h = self.fmt.truncate_h row = ([x if x is not None else '' for x in self.frame.index.names] + [''] * (self.ncols + (1 if truncate_h else 0))) self.write_tr(row, indent, self.indent_delta, header=True) def _write_header(self, indent): self.write('<thead>', indent) if self.fmt.header: self._write_col_header(indent + self.indent_delta) if self.show_row_idx_names: self._write_row_header(indent + self.indent_delta) self.write('</thead>', indent) def _get_formatted_values(self): with option_context('display.max_colwidth', 999999): fmt_values = {i: self.fmt._format_col(i) for i in range(self.ncols)} return fmt_values def _write_body(self, indent): self.write('<tbody>', indent) fmt_values = self._get_formatted_values() # write values if self.fmt.index and isinstance(self.frame.index, ABCMultiIndex): self._write_hierarchical_rows( fmt_values, indent + self.indent_delta) else: self._write_regular_rows( fmt_values, indent + self.indent_delta) self.write('</tbody>', indent) def _write_regular_rows(self, fmt_values, indent): truncate_h = self.fmt.truncate_h truncate_v = self.fmt.truncate_v nrows = len(self.fmt.tr_frame) if self.fmt.index: fmt = self.fmt._get_formatter('__index__') if fmt is not None: index_values = self.fmt.tr_frame.index.map(fmt) else: index_values = self.fmt.tr_frame.index.format() row = [] for i in range(nrows): if truncate_v and i == (self.fmt.tr_row_num): str_sep_row = ['...'] * len(row) self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) row = [] if self.fmt.index: row.append(index_values[i]) # see gh-22579 # Column misalignment also occurs for # a standard index when the columns index is named. # Add blank cell before data cells. elif self.show_col_idx_names: row.append('') row.extend(fmt_values[j][i] for j in range(self.ncols)) if truncate_h: dot_col_ix = self.fmt.tr_col_num + self.row_levels row.insert(dot_col_ix, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) def _write_hierarchical_rows(self, fmt_values, indent): template = 'rowspan="{span}" valign="top"' truncate_h = self.fmt.truncate_h truncate_v = self.fmt.truncate_v frame = self.fmt.tr_frame nrows = len(frame) idx_values = frame.index.format(sparsify=False, adjoin=False, names=False) idx_values = lzip(*idx_values) if self.fmt.sparsify: # GH3547 sentinel = object() levels = frame.index.format(sparsify=sentinel, adjoin=False, names=False) level_lengths = get_level_lengths(levels, sentinel) inner_lvl = len(level_lengths) - 1 if truncate_v: # Insert ... row and adjust idx_values and # level_lengths to take this into account. ins_row = self.fmt.tr_row_num inserted = False for lnum, records in enumerate(level_lengths): rec_new = {} for tag, span in list(records.items()): if tag >= ins_row: rec_new[tag + 1] = span elif tag + span > ins_row: rec_new[tag] = span + 1 # GH 14882 - Make sure insertion done once if not inserted: dot_row = list(idx_values[ins_row - 1]) dot_row[-1] = '...' idx_values.insert(ins_row, tuple(dot_row)) inserted = True else: dot_row = list(idx_values[ins_row]) dot_row[inner_lvl - lnum] = '...' idx_values[ins_row] = tuple(dot_row) else: rec_new[tag] = span # If ins_row lies between tags, all cols idx cols # receive ... if tag + span == ins_row: rec_new[ins_row] = 1 if lnum == 0: idx_values.insert(ins_row, tuple( ['...'] * len(level_lengths))) # GH 14882 - Place ... in correct level elif inserted: dot_row = list(idx_values[ins_row]) dot_row[inner_lvl - lnum] = '...' idx_values[ins_row] = tuple(dot_row) level_lengths[lnum] = rec_new level_lengths[inner_lvl][ins_row] = 1 for ix_col in range(len(fmt_values)): fmt_values[ix_col].insert(ins_row, '...') nrows += 1 for i in range(nrows): row = [] tags = {} sparse_offset = 0 j = 0 for records, v in zip(level_lengths, idx_values[i]): if i in records: if records[i] > 1: tags[j] = template.format(span=records[i]) else: sparse_offset += 1 continue j += 1 row.append(v) row.extend(fmt_values[j][i] for j in range(self.ncols)) if truncate_h: row.insert(self.row_levels - sparse_offset + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=tags, nindex_levels=len(levels) - sparse_offset) else: row = [] for i in range(len(frame)): if truncate_v and i == (self.fmt.tr_row_num): str_sep_row = ['...'] * len(row) self.write_tr(str_sep_row, indent, self.indent_delta, tags=None, nindex_levels=self.row_levels) idx_values = list(zip(*frame.index.format( sparsify=False, adjoin=False, names=False))) row = [] row.extend(idx_values[i]) row.extend(fmt_values[j][i] for j in range(self.ncols)) if truncate_h: row.insert(self.row_levels + self.fmt.tr_col_num, '...') self.write_tr(row, indent, self.indent_delta, tags=None, nindex_levels=frame.index.nlevels) class NotebookFormatter(HTMLFormatter): """ Internal class for formatting output data in html for display in Jupyter Notebooks. This class is intended for functionality specific to DataFrame._repr_html_() and DataFrame.to_html(notebook=True) """ def _get_formatted_values(self): return {i: self.fmt._format_col(i) for i in range(self.ncols)} def _get_columns_formatted_values(self): return self.columns.format() def write_style(self): # We use the "scoped" attribute here so that the desired # style properties for the data frame are not then applied # throughout the entire notebook. template_first = """\ <style scoped>""" template_last = """\ </style>""" template_select = """\ .dataframe %s { %s: %s; }""" element_props = [('tbody tr th:only-of-type', 'vertical-align', 'middle'), ('tbody tr th', 'vertical-align', 'top')] if isinstance(self.columns, ABCMultiIndex): element_props.append(('thead tr th', 'text-align', 'left')) if self.show_row_idx_names: element_props.append(('thead tr:last-of-type th', 'text-align', 'right')) else: element_props.append(('thead th', 'text-align', 'right')) template_mid = '\n\n'.join(map(lambda t: template_select % t, element_props)) template = dedent('\n'.join((template_first, template_mid, template_last))) self.write(template) def render(self): self.write('<div>') self.write_style() super(NotebookFormatter, self).render() self.write('</div>') return self.elements
import numpy as np import pandas as pd import tensorflow as tf from tensorflow.keras.preprocessing.text import Tokenizer from tensorflow.keras import Input from keras.layers.core import Activation, Dropout, Dense from keras.layers import Flatten, LSTM, Bidirectional, Concatenate from keras.layers.embeddings import Embedding from keras.models import Model from keras import Sequential from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score import matplotlib.pyplot as plt from src.utils import * from model import (do_padding,process_keyword,get_location_cols, get_extra, location_dummies,preprocess_text,convert_cities,convert_countries) train = pd.read_csv("data/train.csv") test = pd.read_csv("data/test.csv") # Processing extra vars keyword_bins = pd.read_csv("data/keyword_bins.csv", dtype={"keyword":str, "keyword_bin":str}) locations = pd.read_csv("data/locations.csv") train_plus = train.merge(keyword_bins, how="left", on = "keyword"). \ merge(locations, how="left", on="location") train_plus.loc[train_plus["keyword_bin"].isna(), "keyword_bin"] = "missing" train_plus = convert_cities(train_plus) train_plus = convert_countries(train_plus) train_plus = get_extra(train_plus) dummies = pd.get_dummies(train_plus[["mention", "link", "hashtag", "city", "country", "keyword_bin"]]) dummy_cols = dummies.columns comb = pd.concat([train["text"], dummies], axis=1) # Split val and train X_train, X_val, y_train, y_val = \ train_test_split(comb, train["target"], test_size=0.2, random_state=42) # 1st parameter is vocab size """ Parameters: vocab size - max number of words taken by tokenizer max_len - maximum length of the sequence """ vocab_size = 8000 max_len = 25 """ Preprocessing Text """ # Text text_train = preprocess_text(X_train["text"]) text_val = preprocess_text(X_val["text"]) tokenizer = Tokenizer(num_words = vocab_size, oov_token = "<oov>") tokenizer.fit_on_texts(text_train) padded_train, _ = do_padding(text_train, tokenizer, max_len, "post", "post") padded_val, _ = do_padding(text_val, tokenizer, max_len, "post", "post") """ Model: - Text model - Others model (w outputs from text) """ # Text model dropout_rate=0.3 text_model = Sequential([ Embedding(vocab_size, 36, input_length = max_len), Bidirectional(LSTM(16,return_sequences=True,dropout=dropout_rate)), Bidirectional(LSTM(16,return_sequences=True,dropout=dropout_rate)), Bidirectional(LSTM(16,dropout=dropout_rate)), Dense(20, activation="relu"), Dense(1, activation="sigmoid") ]) text_model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) text_model.summary() history = text_model.fit(padded_train, y_train, epochs=2, validation_data=(padded_val, y_val), verbose=True) text_pred = text_model.predict(padded_val) accuracy_score(np.where(text_pred>0.5, 1, 0), y_val) # Trying CNN from keras.layers.convolutional import Conv1D from keras.layers.convolutional import MaxPooling1D dropout_rate=0.3 text_model = Sequential([ Embedding(vocab_size, 36, input_length = max_len), Conv1D(32, 3, padding='same', activation='relu'), MaxPooling1D(), Flatten(), Dense(20, activation="relu"), Dense(1, activation="sigmoid") ]) text_model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) text_model.summary() history = text_model.fit(padded_train, y_train, epochs=10, validation_data=(padded_val, y_val), verbose=True) # Model other vars from xgboost import XGBClassifier add_model = XGBClassifier() add_model.fit(X_train[dummy_cols], y_train) other_model = Sequential([ Dense(64, activation = "relu", input_shape = (len(dummy_cols), )), Dense(32, activation = "relu"), Dense(1, activation="sigmoid") ]) other_model.compile(loss='binary_crossentropy',optimizer='adam',metrics=['accuracy']) other_model.summary() history = other_model.fit(X_train[dummy_cols], y_train, epochs = 20, validation_data=(X_val[dummy_cols], y_val)) other_pred = other_model.predict(X_val[dummy_cols]) accuracy_score(np.where(other_pred>0.5, 1, 0), y_val) np.corrcoef(other_pred.reshape(-1), text_pred.reshape(-1)) # Concat Model dropout_rate = 0.5 input_1 = Input(shape=(max_len,)) input_2 = Input(shape=(len(dummy_cols),)) embedding_layer = Embedding(vocab_size, 36)(input_1) lstm_1 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(embedding_layer) lstm_2 = Bidirectional(LSTM(16, return_sequences=True, dropout=dropout_rate))(lstm_1) lstm_3 = Bidirectional(LSTM(16, dropout=dropout_rate))(lstm_2) dense_1 = Dense(8, activation="relu")(lstm_3) dense_2 = Dense(64, activation="relu")(input_2) dropout_1 = Dropout(dropout_rate)(dense_2) dense_3 = Dense(32, activation="relu")(dropout_1) dropout_2 = Dropout(dropout_rate)(dense_3) dense_4 = Dense(8, activation="relu")(dropout_2) concat_layer = Concatenate()([dense_1, dense_4]) dropout_3 = Dropout(dropout_rate)(concat_layer) # dense_4 = Dense(20, activation="relu")(concat_layer) dense_4 = Dense(20, activation="relu")(dropout_3) dropout_6 = Dropout(dropout_rate)(dense_4) output = Dense(1, activation='sigmoid')(dropout_6) # output = Dense(1, activation='sigmoid')(concat_layer) model = Model(inputs=[input_1, input_2], outputs=output) model.compile(loss='binary_crossentropy', # optimizer=tf.keras.optimizers.Adam(learning_rate=0.01), optimizer="adam", metrics=['accuracy']) model.summary() history = model.fit(x=[padded_train, X_train[dummy_cols]], y=y_train, epochs=5, verbose=1, validation_data=([padded_val, X_val[dummy_cols]], y_val)) # Ill go with this model. I'm still a bit annoyed that we can't increase # accuracy by combining features. I suppose that's likely due to overfitting. # final_preds.py script fits model on full training set. text_pred_train = text_model.predict(padded_train) other_pred_train = other_model.predict(X_train[dummy_cols]) train_x = np.concatenate((text_pred_train, other_pred_train ), axis=1) text_pred_val = text_model.predict(padded_val) other_pred_val = other_model.predict(X_val[dummy_cols]) val_x = np.concatenate((text_pred_val, other_pred_val), axis=1) from sklearn.linear_model import LogisticRegression stacked_mod = LogisticRegression() stacked_mod.fit(train_x, y_train) comb_preds = stacked_mod.predict(val_x) accuracy_score(y_val, comb_preds)
from datetime import datetime, date from typing import List, Optional from cumulusci.tasks.bulkdata.step import DataOperationType def adjust_relative_dates( mapping, context, record: List[Optional[str]], operation: DataOperationType, ): """Convert specified date and time fields (in ISO format) relative to the present moment. If some date is 2020-07-30, anchor_date is 2020-07-23, and today's date is 2020-09-01, that date will become 2020-09-07 - the same position in the timeline relative to today.""" date_fields, date_time_fields, today = context # Determine the direction in which we are converting. # For extracts, we convert the date from today-anchored to mapping.anchor_date-anchored. # For loads, we do the reverse. if operation is DataOperationType.QUERY: current_anchor = today target_anchor = mapping.anchor_date else: current_anchor = mapping.anchor_date target_anchor = today r = record.copy() for index in date_fields: if r[index]: r[index] = date_to_iso( _offset_date(target_anchor, current_anchor, iso_to_date(r[index])) ) for index in date_time_fields: if r[index]: r[index] = salesforce_from_datetime( _offset_datetime( target_anchor, current_anchor, datetime_from_salesforce(r[index]), ) ) return r # The Salesforce API returns datetimes with millisecond resolution, but milliseconds # are always zero (that is, .000). Python does parse this with strptime. # Python renders datetimes into ISO8601 with microsecond resolution (.123456), # which Salesforce won't accept - we need exactly three digits, although they are # currently ignored. Python also right-truncates to `.0`, which Salesforce won't take. # Hence this clumsy workaround. def datetime_from_salesforce(d): """Create a Python datetime from a Salesforce-style ISO8601 string""" return datetime.strptime(d, "%Y-%m-%dT%H:%M:%S.%f%z") def salesforce_from_datetime(d): """Create a Salesforce-style ISO8601 string from a Python datetime""" return d.strftime("%Y-%m-%dT%H:%M:%S.{}+0000").format( str(d.microsecond)[:3].ljust(3, "0") ) # Python 3.6 doesn't support the fromisoformat() method. # These functions are explicit and work on all supported versions. def date_to_iso(d): """Convert date object to ISO8601 string""" return d.strftime("%Y-%m-%d") def iso_to_date(s): """Convert ISO8601 string to date object""" if isinstance(s, date): return s return datetime.strptime(s, "%Y-%m-%d").date() def _offset_date(target_anchor, current_anchor, this_date): """Adjust this_date to be relative to target_anchor instead of current_anchor""" return target_anchor + (this_date - current_anchor) def _offset_datetime(target_anchor, current_anchor, this_datetime): """Adjust this_datetime to be relative to target_anchor instead of current_anchor""" return datetime.combine( _offset_date(target_anchor, current_anchor, this_datetime.date()), this_datetime.time(), )
from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import collections import itertools from logging import getLogger import random import chainer import chainer.functions as F import numpy as np import chainerrl from chainerrl import agent from chainerrl.agents.ppo import _compute_explained_variance from chainerrl.agents.ppo import _make_dataset from chainerrl.agents.ppo import _make_dataset_recurrent from chainerrl.agents.ppo import _yield_subset_of_sequences_with_fixed_number_of_items # NOQA from chainerrl.misc.batch_states import batch_states def _get_ordered_params(link): """Get a list of parameters sorted by parameter names.""" name_param_pairs = list(link.namedparams()) ordered_name_param_pairs = sorted(name_param_pairs, key=lambda x: x[0]) return [x[1] for x in ordered_name_param_pairs] def _flatten_and_concat_variables(vs): """Flatten and concat variables to make a single flat vector variable.""" return F.concat([F.flatten(v) for v in vs], axis=0) def _as_ndarray(x): """chainer.Variable or ndarray -> ndarray.""" if isinstance(x, chainer.Variable): return x.array else: return x def _flatten_and_concat_ndarrays(vs): """Flatten and concat variables to make a single flat vector ndarray.""" xp = chainer.cuda.get_array_module(vs[0]) return xp.concatenate([_as_ndarray(v).ravel() for v in vs], axis=0) def _split_and_reshape_to_ndarrays(flat_v, sizes, shapes): """Split and reshape a single flat vector to make a list of ndarrays.""" xp = chainer.cuda.get_array_module(flat_v) sections = np.cumsum(sizes) vs = xp.split(flat_v, sections) return [v.reshape(shape) for v, shape in zip(vs, shapes)] def _replace_params_data(params, new_params_data): """Replace data of params with new data.""" for param, new_param_data in zip(params, new_params_data): assert param.shape == new_param_data.shape param.array[:] = new_param_data def _hessian_vector_product(flat_grads, params, vec): """Compute hessian vector product efficiently by backprop.""" grads = chainer.grad([F.sum(flat_grads * vec)], params) assert all(grad is not None for grad in grads),\ "The Hessian-vector product contains None." grads_data = [grad.array for grad in grads] return _flatten_and_concat_ndarrays(grads_data) def _mean_or_nan(xs): """Return its mean a non-empty sequence, numpy.nan for a empty one.""" return np.mean(xs) if xs else np.nan def _find_old_style_function(outputs): """Find old-style functions in the computational graph.""" found = [] for v in outputs: assert isinstance(v, (chainer.Variable, chainer.variable.VariableNode)) if v.creator is None: continue if isinstance(v.creator, chainer.Function): found.append(v.creator) else: assert isinstance(v.creator, chainer.FunctionNode) found.extend(_find_old_style_function(v.creator.inputs)) return found class TRPO(agent.AttributeSavingMixin, agent.Agent): """Trust Region Policy Optimization. A given stochastic policy is optimized by the TRPO algorithm. A given value function is also trained to predict by the TD(lambda) algorithm and used for Generalized Advantage Estimation (GAE). Since the policy is optimized via the conjugate gradient method and line search while the value function is optimized via SGD, these two models should be separate. Since TRPO requires second-order derivatives to compute Hessian-vector products, Chainer v3.0.0 or newer is required. In addition, your policy must contain only functions that support second-order derivatives. See https://arxiv.org/abs/1502.05477 for TRPO. See https://arxiv.org/abs/1506.02438 for GAE. Args: policy (Policy): Stochastic policy. Its forward computation must contain only functions that support second-order derivatives. Recurrent models are not supported. vf (ValueFunction): Value function. Recurrent models are not supported. vf_optimizer (chainer.Optimizer): Optimizer for the value function. obs_normalizer (chainerrl.links.EmpiricalNormalization or None): If set to chainerrl.links.EmpiricalNormalization, it is used to normalize observations based on the empirical mean and standard deviation of observations. These statistics are updated after computing advantages and target values and before updating the policy and the value function. gamma (float): Discount factor [0, 1] lambd (float): Lambda-return factor [0, 1] phi (callable): Feature extractor function entropy_coef (float): Weight coefficient for entropoy bonus [0, inf) update_interval (int): Interval steps of TRPO iterations. Every after this amount of steps, this agent updates the policy and the value function using data from these steps. vf_epochs (int): Number of epochs for which the value function is trained on each TRPO iteration. vf_batch_size (int): Batch size of SGD for the value function. standardize_advantages (bool): Use standardized advantages on updates line_search_max_backtrack (int): Maximum number of backtracking in line search to tune step sizes of policy updates. conjugate_gradient_max_iter (int): Maximum number of iterations in the conjugate gradient method. conjugate_gradient_damping (float): Damping factor used in the conjugate gradient method. act_deterministically (bool): If set to True, choose most probable actions in the act method instead of sampling from distributions. value_stats_window (int): Window size used to compute statistics of value predictions. entropy_stats_window (int): Window size used to compute statistics of entropy of action distributions. kl_stats_window (int): Window size used to compute statistics of KL divergence between old and new policies. policy_step_size_stats_window (int): Window size used to compute statistics of step sizes of policy updates. Statistics: average_value: Average of value predictions on non-terminal states. It's updated before the value function is updated. average_entropy: Average of entropy of action distributions on non-terminal states. It's updated on act_and_train. average_kl: Average of KL divergence between old and new policies. It's updated after the policy is updated. average_policy_step_size: Average of step sizes of policy updates It's updated after the policy is updated. """ saved_attributes = ['policy', 'vf', 'vf_optimizer', 'obs_normalizer'] def __init__(self, policy, vf, vf_optimizer, obs_normalizer=None, gamma=0.99, lambd=0.95, phi=lambda x: x, entropy_coef=0.01, update_interval=2048, max_kl=0.01, vf_epochs=3, vf_batch_size=64, standardize_advantages=True, batch_states=batch_states, recurrent=False, max_recurrent_sequence_len=None, line_search_max_backtrack=10, conjugate_gradient_max_iter=10, conjugate_gradient_damping=1e-2, act_deterministically=False, value_stats_window=1000, entropy_stats_window=1000, kl_stats_window=100, policy_step_size_stats_window=100, logger=getLogger(__name__), ): self.policy = policy self.vf = vf assert policy.xp is vf.xp, 'policy and vf must be on the same device' if recurrent: self.model = chainerrl.links.StatelessRecurrentBranched(policy, vf) else: self.model = chainerrl.links.Branched(policy, vf) if policy.xp is not np: if hasattr(policy, 'device'): # Link.device is available only from chainer v6 self.model.to_device(policy.device) else: self.model.to_gpu(device=policy._device_id) self.vf_optimizer = vf_optimizer self.obs_normalizer = obs_normalizer self.gamma = gamma self.lambd = lambd self.phi = phi self.entropy_coef = entropy_coef self.update_interval = update_interval self.max_kl = max_kl self.vf_epochs = vf_epochs self.vf_batch_size = vf_batch_size self.standardize_advantages = standardize_advantages self.batch_states = batch_states self.recurrent = recurrent self.max_recurrent_sequence_len = max_recurrent_sequence_len self.line_search_max_backtrack = line_search_max_backtrack self.conjugate_gradient_max_iter = conjugate_gradient_max_iter self.conjugate_gradient_damping = conjugate_gradient_damping self.act_deterministically = act_deterministically self.logger = logger self.value_record = collections.deque(maxlen=value_stats_window) self.entropy_record = collections.deque(maxlen=entropy_stats_window) self.kl_record = collections.deque(maxlen=kl_stats_window) self.policy_step_size_record = collections.deque( maxlen=policy_step_size_stats_window) self.explained_variance = np.nan assert self.policy.xp is self.vf.xp,\ 'policy and vf should be in the same device.' if self.obs_normalizer is not None: assert self.policy.xp is self.obs_normalizer.xp,\ 'policy and obs_normalizer should be in the same device.' self.xp = self.policy.xp self.last_state = None self.last_action = None # Contains episodes used for next update iteration self.memory = [] # Contains transitions of the last episode not moved to self.memory yet self.last_episode = [] # Batch versions of last_episode, last_state, and last_action self.batch_last_episode = None self.batch_last_state = None self.batch_last_action = None # Recurrent states of the model self.train_recurrent_states = None self.train_prev_recurrent_states = None self.test_recurrent_states = None def _initialize_batch_variables(self, num_envs): self.batch_last_episode = [[] for _ in range(num_envs)] self.batch_last_state = [None] * num_envs self.batch_last_action = [None] * num_envs def _update_if_dataset_is_ready(self): dataset_size = ( sum(len(episode) for episode in self.memory) + len(self.last_episode) + (0 if self.batch_last_episode is None else sum( len(episode) for episode in self.batch_last_episode))) if dataset_size >= self.update_interval: self._flush_last_episode() if self.recurrent: dataset = _make_dataset_recurrent( episodes=self.memory, model=self.model, phi=self.phi, batch_states=self.batch_states, obs_normalizer=self.obs_normalizer, gamma=self.gamma, lambd=self.lambd, max_recurrent_sequence_len=self.max_recurrent_sequence_len, ) self._update_recurrent(dataset) else: dataset = _make_dataset( episodes=self.memory, model=self.model, phi=self.phi, batch_states=self.batch_states, obs_normalizer=self.obs_normalizer, gamma=self.gamma, lambd=self.lambd, ) assert len(dataset) == dataset_size self._update(dataset) self.explained_variance = _compute_explained_variance( list(itertools.chain.from_iterable(self.memory))) self.memory = [] def _flush_last_episode(self): if self.last_episode: self.memory.append(self.last_episode) self.last_episode = [] if self.batch_last_episode: for i, episode in enumerate(self.batch_last_episode): if episode: self.memory.append(episode) self.batch_last_episode[i] = [] def _update(self, dataset): """Update both the policy and the value function.""" if self.obs_normalizer: self._update_obs_normalizer(dataset) self._update_policy(dataset) self._update_vf(dataset) def _update_recurrent(self, dataset): """Update both the policy and the value function.""" flat_dataset = list(itertools.chain.from_iterable(dataset)) if self.obs_normalizer: self._update_obs_normalizer(flat_dataset) self._update_policy_recurrent(dataset) self._update_vf_recurrent(dataset) def _update_vf_recurrent(self, dataset): for epoch in range(self.vf_epochs): random.shuffle(dataset) for minibatch in _yield_subset_of_sequences_with_fixed_number_of_items( # NOQA dataset, self.vf_batch_size): self._update_vf_once_recurrent(minibatch) def _update_vf_once_recurrent(self, episodes): xp = self.model.xp flat_transitions = list(itertools.chain.from_iterable(episodes)) # Prepare data for a recurrent model seqs_states = [] for ep in episodes: states = self.batch_states( [transition['state'] for transition in ep], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) seqs_states.append(states) flat_vs_teacher = xp.array( [[transition['v_teacher']] for transition in flat_transitions], dtype=np.float32) with chainer.using_config('train', False),\ chainer.no_backprop_mode(): vf_rs = self.vf.concatenate_recurrent_states( [ep[0]['recurrent_state'][1] for ep in episodes]) flat_vs_pred, _ = self.vf.n_step_forward( seqs_states, recurrent_state=vf_rs, output_mode='concat') vf_loss = F.mean_squared_error(flat_vs_pred, flat_vs_teacher) self.vf_optimizer.update(lambda: vf_loss) def _update_obs_normalizer(self, dataset): assert self.obs_normalizer states = batch_states( [b['state'] for b in dataset], self.obs_normalizer.xp, self.phi) self.obs_normalizer.experience(states) def _update_vf(self, dataset): """Update the value function using a given dataset. The value function is updated via SGD to minimize TD(lambda) errors. """ xp = self.vf.xp assert 'state' in dataset[0] assert 'v_teacher' in dataset[0] dataset_iter = chainer.iterators.SerialIterator( dataset, self.vf_batch_size) while dataset_iter.epoch < self.vf_epochs: batch = dataset_iter.__next__() states = batch_states([b['state'] for b in batch], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) vs_teacher = xp.array( [b['v_teacher'] for b in batch], dtype=xp.float32) vs_pred = self.vf(states) vf_loss = F.mean_squared_error(vs_pred, vs_teacher[..., None]) self.vf_optimizer.update(lambda: vf_loss) def _compute_gain(self, log_prob, log_prob_old, entropy, advs): """Compute a gain to maximize.""" prob_ratio = F.exp(log_prob - log_prob_old) mean_entropy = F.mean(entropy) surrogate_gain = F.mean(prob_ratio * advs) return surrogate_gain + self.entropy_coef * mean_entropy def _update_policy(self, dataset): """Update the policy using a given dataset. The policy is updated via CG and line search. """ assert 'state' in dataset[0] assert 'action' in dataset[0] assert 'adv' in dataset[0] # Use full-batch xp = self.policy.xp states = batch_states([b['state'] for b in dataset], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) actions = xp.array([b['action'] for b in dataset]) advs = xp.array([b['adv'] for b in dataset], dtype=np.float32) if self.standardize_advantages: mean_advs = xp.mean(advs) std_advs = xp.std(advs) advs = (advs - mean_advs) / (std_advs + 1e-8) # Recompute action distributions for batch backprop action_distrib = self.policy(states) log_prob_old = xp.array( [transition['log_prob'] for transition in dataset], dtype=np.float32) gain = self._compute_gain( log_prob=action_distrib.log_prob(actions), log_prob_old=log_prob_old, entropy=action_distrib.entropy, advs=advs) # Distribution to compute KL div against action_distrib_old = action_distrib.copy() full_step = self._compute_kl_constrained_step( action_distrib=action_distrib, action_distrib_old=action_distrib_old, gain=gain) self._line_search( full_step=full_step, dataset=dataset, advs=advs, action_distrib_old=action_distrib_old, gain=gain) def _update_policy_recurrent(self, dataset): """Update the policy using a given dataset. The policy is updated via CG and line search. """ xp = self.model.xp flat_transitions = list(itertools.chain.from_iterable(dataset)) # Prepare data for a recurrent model seqs_states = [] for ep in dataset: states = self.batch_states( [transition['state'] for transition in ep], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) seqs_states.append(states) flat_actions = xp.array( [transition['action'] for transition in flat_transitions]) flat_advs = xp.array( [transition['adv'] for transition in flat_transitions], dtype=np.float32) if self.standardize_advantages: mean_advs = xp.mean(flat_advs) std_advs = xp.std(flat_advs) flat_advs = (flat_advs - mean_advs) / (std_advs + 1e-8) with chainer.using_config('train', False),\ chainer.no_backprop_mode(): policy_rs = self.policy.concatenate_recurrent_states( [ep[0]['recurrent_state'][0] for ep in dataset]) flat_distribs, _ = self.policy.n_step_forward( seqs_states, recurrent_state=policy_rs, output_mode='concat') log_prob_old = xp.array( [transition['log_prob'] for transition in flat_transitions], dtype=np.float32) gain = self._compute_gain( log_prob=flat_distribs.log_prob(flat_actions), log_prob_old=log_prob_old, entropy=flat_distribs.entropy, advs=flat_advs) # Distribution to compute KL div against action_distrib_old = flat_distribs.copy() full_step = self._compute_kl_constrained_step( action_distrib=flat_distribs, action_distrib_old=action_distrib_old, gain=gain) self._line_search( full_step=full_step, dataset=dataset, advs=flat_advs, action_distrib_old=action_distrib_old, gain=gain) def _compute_kl_constrained_step(self, action_distrib, action_distrib_old, gain): """Compute a step of policy parameters with a KL constraint.""" policy_params = _get_ordered_params(self.policy) kl = F.mean(action_distrib_old.kl(action_distrib)) # Check if kl computation fully supports double backprop old_style_funcs = _find_old_style_function([kl]) if old_style_funcs: raise RuntimeError("""\ Old-style functions (chainer.Function) are used to compute KL divergence. Since TRPO requires second-order derivative of KL divergence, its computation should be done with new-style functions (chainer.FunctionNode) only. Found old-style functions: {}""".format(old_style_funcs)) kl_grads = chainer.grad([kl], policy_params, enable_double_backprop=True) assert all(g is not None for g in kl_grads), "\ The gradient contains None. The policy may have unused parameters." flat_kl_grads = _flatten_and_concat_variables(kl_grads) def fisher_vector_product_func(vec): fvp = _hessian_vector_product(flat_kl_grads, policy_params, vec) return fvp + self.conjugate_gradient_damping * vec gain_grads = chainer.grad([gain], policy_params) assert all(g is not None for g in kl_grads), "\ The gradient contains None. The policy may have unused parameters." flat_gain_grads = _flatten_and_concat_ndarrays(gain_grads) step_direction = chainerrl.misc.conjugate_gradient( fisher_vector_product_func, flat_gain_grads, max_iter=self.conjugate_gradient_max_iter, ) # We want a step size that satisfies KL(old|new) < max_kl. # Let d = alpha * step_direction be the actual parameter updates. # The second-order approximation of KL divergence is: # KL(old|new) = 1/2 d^T I d + O(||d||^3), # where I is a Fisher information matrix. # Substitute d = alpha * step_direction and solve KL(old|new) = max_kl # for alpha to get the step size that tightly satisfies the constraint. dId = float(step_direction.dot( fisher_vector_product_func(step_direction))) scale = (2.0 * self.max_kl / (dId + 1e-8)) ** 0.5 return scale * step_direction def _line_search(self, full_step, dataset, advs, action_distrib_old, gain): """Do line search for a safe step size.""" xp = self.policy.xp policy_params = _get_ordered_params(self.policy) policy_params_sizes = [param.size for param in policy_params] policy_params_shapes = [param.shape for param in policy_params] step_size = 1.0 flat_params = _flatten_and_concat_ndarrays(policy_params) if self.recurrent: seqs_states = [] for ep in dataset: states = self.batch_states( [transition['state'] for transition in ep], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) seqs_states.append(states) with chainer.using_config('train', False),\ chainer.no_backprop_mode(): policy_rs = self.policy.concatenate_recurrent_states( [ep[0]['recurrent_state'][0] for ep in dataset]) def evaluate_current_policy(): distrib, _ = self.policy.n_step_forward( seqs_states, recurrent_state=policy_rs, output_mode='concat') return distrib else: states = self.batch_states( [transition['state'] for transition in dataset], xp, self.phi) if self.obs_normalizer: states = self.obs_normalizer(states, update=False) def evaluate_current_policy(): return self.policy(states) flat_transitions = (list(itertools.chain.from_iterable(dataset)) if self.recurrent else dataset) actions = xp.array( [transition['action'] for transition in flat_transitions]) log_prob_old = xp.array( [transition['log_prob'] for transition in flat_transitions], dtype=np.float32) for i in range(self.line_search_max_backtrack + 1): self.logger.info( 'Line search iteration: %s step size: %s', i, step_size) new_flat_params = flat_params + step_size * full_step new_params = _split_and_reshape_to_ndarrays( new_flat_params, sizes=policy_params_sizes, shapes=policy_params_shapes, ) _replace_params_data(policy_params, new_params) with chainer.using_config('train', False),\ chainer.no_backprop_mode(): new_action_distrib = evaluate_current_policy() new_gain = self._compute_gain( log_prob=new_action_distrib.log_prob(actions), log_prob_old=log_prob_old, entropy=new_action_distrib.entropy, advs=advs) new_kl = F.mean(action_distrib_old.kl(new_action_distrib)) improve = new_gain.array - gain.array self.logger.info( 'Surrogate objective improve: %s', float(improve)) self.logger.info('KL divergence: %s', float(new_kl.array)) if not xp.isfinite(new_gain.array): self.logger.info( "Surrogate objective is not finite. Bakctracking...") elif not xp.isfinite(new_kl.array): self.logger.info( "KL divergence is not finite. Bakctracking...") elif improve < 0: self.logger.info( "Surrogate objective didn't improve. Bakctracking...") elif float(new_kl.array) > self.max_kl: self.logger.info( "KL divergence exceeds max_kl. Bakctracking...") else: self.kl_record.append(float(new_kl.array)) self.policy_step_size_record.append(step_size) break step_size *= 0.5 else: self.logger.info("\ Line search coundn't find a good step size. The policy was not updated.") self.policy_step_size_record.append(0.) _replace_params_data( policy_params, _split_and_reshape_to_ndarrays( flat_params, sizes=policy_params_sizes, shapes=policy_params_shapes), ) def act_and_train(self, obs, reward): if self.last_state is not None: transition = { 'state': self.last_state, 'action': self.last_action, 'reward': reward, 'next_state': obs, 'nonterminal': 1.0, } if self.recurrent: transition['recurrent_state'] =\ self.model.get_recurrent_state_at( self.train_prev_recurrent_states, 0, unwrap_variable=True) self.train_prev_recurrent_states = None transition['next_recurrent_state'] =\ self.model.get_recurrent_state_at( self.train_recurrent_states, 0, unwrap_variable=True) self.last_episode.append(transition) self._update_if_dataset_is_ready() xp = self.xp b_state = self.batch_states([obs], xp, self.phi) if self.obs_normalizer: b_state = self.obs_normalizer(b_state, update=False) # action_distrib will be recomputed when computing gradients with chainer.using_config('train', False), chainer.no_backprop_mode(): if self.recurrent: assert self.train_prev_recurrent_states is None self.train_prev_recurrent_states = self.train_recurrent_states (action_distrib, value), self.train_recurrent_states =\ self.model(b_state, self.train_prev_recurrent_states) else: action_distrib, value = self.model(b_state) action = chainer.cuda.to_cpu(action_distrib.sample().array)[0] self.entropy_record.append(float(action_distrib.entropy.array)) self.value_record.append(float(value.array)) self.last_state = obs self.last_action = action return action def act(self, obs): xp = self.xp b_state = self.batch_states([obs], xp, self.phi) if self.obs_normalizer: b_state = self.obs_normalizer(b_state, update=False) with chainer.using_config('train', False), chainer.no_backprop_mode(): if self.recurrent: action_distrib, self.test_recurrent_states =\ self.policy(b_state, self.test_recurrent_states) else: action_distrib = self.policy(b_state) if self.act_deterministically: action = chainer.cuda.to_cpu( action_distrib.most_probable.array)[0] else: action = chainer.cuda.to_cpu( action_distrib.sample().array)[0] return action def stop_episode_and_train(self, state, reward, done=False): assert self.last_state is not None transition = { 'state': self.last_state, 'action': self.last_action, 'reward': reward, 'next_state': state, 'nonterminal': 0.0 if done else 1.0, } if self.recurrent: transition['recurrent_state'] = self.model.get_recurrent_state_at( self.train_prev_recurrent_states, 0, unwrap_variable=True) self.train_prev_recurrent_states = None transition['next_recurrent_state'] =\ self.model.get_recurrent_state_at( self.train_recurrent_states, 0, unwrap_variable=True) self.train_recurrent_states = None self.last_episode.append(transition) self.last_state = None self.last_action = None self._flush_last_episode() self.stop_episode() self._update_if_dataset_is_ready() def stop_episode(self): self.test_recurrent_states = None def batch_act(self, batch_obs): xp = self.xp b_state = self.batch_states(batch_obs, xp, self.phi) if self.obs_normalizer: b_state = self.obs_normalizer(b_state, update=False) with chainer.using_config('train', False), chainer.no_backprop_mode(): if self.recurrent: (action_distrib, _), self.test_recurrent_states = self.model( b_state, self.test_recurrent_states) else: action_distrib, _ = self.model(b_state) if self.act_deterministically: action = chainer.cuda.to_cpu( action_distrib.most_probable.array) else: action = chainer.cuda.to_cpu(action_distrib.sample().array) return action def batch_act_and_train(self, batch_obs): xp = self.xp b_state = self.batch_states(batch_obs, xp, self.phi) if self.obs_normalizer: b_state = self.obs_normalizer(b_state, update=False) num_envs = len(batch_obs) if self.batch_last_episode is None: self._initialize_batch_variables(num_envs) assert len(self.batch_last_episode) == num_envs assert len(self.batch_last_state) == num_envs assert len(self.batch_last_action) == num_envs # action_distrib will be recomputed when computing gradients with chainer.using_config('train', False), chainer.no_backprop_mode(): if self.recurrent: assert self.train_prev_recurrent_states is None self.train_prev_recurrent_states = self.train_recurrent_states (action_distrib, batch_value), self.train_recurrent_states =\ self.model(b_state, self.train_prev_recurrent_states) else: action_distrib, batch_value = self.model(b_state) batch_action = chainer.cuda.to_cpu(action_distrib.sample().array) self.entropy_record.extend( chainer.cuda.to_cpu(action_distrib.entropy.array)) self.value_record.extend(chainer.cuda.to_cpu((batch_value.array))) self.batch_last_state = list(batch_obs) self.batch_last_action = list(batch_action) return batch_action def batch_observe(self, batch_obs, batch_reward, batch_done, batch_reset): if self.recurrent: # Reset recurrent states when episodes end indices_that_ended = [ i for i, (done, reset) in enumerate(zip(batch_done, batch_reset)) if done or reset] if indices_that_ended: self.test_recurrent_states =\ self.model.mask_recurrent_state_at( self.test_recurrent_states, indices_that_ended) def batch_observe_and_train(self, batch_obs, batch_reward, batch_done, batch_reset): for i, (state, action, reward, next_state, done, reset) in enumerate(zip( # NOQA self.batch_last_state, self.batch_last_action, batch_reward, batch_obs, batch_done, batch_reset, )): if state is not None: assert action is not None transition = { 'state': state, 'action': action, 'reward': reward, 'next_state': next_state, 'nonterminal': 0.0 if done else 1.0, } if self.recurrent: transition['recurrent_state'] =\ self.model.get_recurrent_state_at( self.train_prev_recurrent_states, i, unwrap_variable=True) transition['next_recurrent_state'] =\ self.model.get_recurrent_state_at( self.train_recurrent_states, i, unwrap_variable=True) self.batch_last_episode[i].append(transition) if done or reset: assert self.batch_last_episode[i] self.memory.append(self.batch_last_episode[i]) self.batch_last_episode[i] = [] self.batch_last_state[i] = None self.batch_last_action[i] = None self.train_prev_recurrent_states = None if self.recurrent: # Reset recurrent states when episodes end indices_that_ended = [ i for i, (done, reset) in enumerate(zip(batch_done, batch_reset)) if done or reset] if indices_that_ended: self.train_recurrent_states =\ self.model.mask_recurrent_state_at( self.train_recurrent_states, indices_that_ended) self._update_if_dataset_is_ready() def get_statistics(self): return [ ('average_value', _mean_or_nan(self.value_record)), ('average_entropy', _mean_or_nan(self.entropy_record)), ('average_kl', _mean_or_nan(self.kl_record)), ('average_policy_step_size', _mean_or_nan(self.policy_step_size_record)), ('explained_variance', self.explained_variance), ]
# To use this code, make sure you # # import json # # and then, to convert JSON from a string, do # # result = stars_add_response_from_dict(json.loads(json_string)) from dataclasses import dataclass from typing import Optional, Any, TypeVar, Type, cast T = TypeVar("T") def from_bool(x: Any) -> bool: assert isinstance(x, bool) return x def from_none(x: Any) -> Any: assert x is None return x def from_union(fs, x): for f in fs: try: return f(x) except: pass assert False def from_str(x: Any) -> str: assert isinstance(x, str) return x def to_class(c: Type[T], x: Any) -> dict: assert isinstance(x, c) return cast(Any, x).to_dict() @dataclass class StarsAddResponse: ok: Optional[bool] = None error: Optional[str] = None needed: Optional[str] = None provided: Optional[str] = None @staticmethod def from_dict(obj: Any) -> 'StarsAddResponse': assert isinstance(obj, dict) ok = from_union([from_bool, from_none], obj.get("ok")) error = from_union([from_str, from_none], obj.get("error")) needed = from_union([from_str, from_none], obj.get("needed")) provided = from_union([from_str, from_none], obj.get("provided")) return StarsAddResponse(ok, error, needed, provided) def to_dict(self) -> dict: result: dict = {} result["ok"] = from_union([from_bool, from_none], self.ok) result["error"] = from_union([from_str, from_none], self.error) result["needed"] = from_union([from_str, from_none], self.needed) result["provided"] = from_union([from_str, from_none], self.provided) return result def stars_add_response_from_dict(s: Any) -> StarsAddResponse: return StarsAddResponse.from_dict(s) def stars_add_response_to_dict(x: StarsAddResponse) -> Any: return to_class(StarsAddResponse, x)
import tensorflow as tf from functools import partial import os from shutil import copyfile from datetime import datetime import skimage as sk import numpy as np from time import time from fasterflow import train from fasterflow import dataset from fasterflow import utils from fasterflow import evaluate import config_pro_gan as config import network_pro_gan as network #---------------------------------------------------------------------------- # Training schedule def training_schedule(cur_img): """ Return the current resolution depending of the current img. """ stage_length = config.stage_length trans_length = config.trans_length phase = cur_img // (stage_length+trans_length) + 2 if phase >= np.log2(config.image_size): return np.log2(config.image_size) if cur_img % (stage_length+trans_length) < stage_length: return phase else: a = (cur_img % (stage_length+trans_length) - stage_length)/trans_length return phase + a #---------------------------------------------------------------------------- # Train gan def train_pro_gan(): """ Train the generative adverserial network. """ # Graph definition: inputs, model, loss, optimizer, initializer print('Graph building...') # Inputs image_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32, shape=[None]+config.image_shape, name='image_inputs') noise_inputs = tf.compat.v1.placeholder(tf.compat.v1.float32, shape=[None,config.latent_size], name='noise_inputs') res_training = tf.compat.v1.placeholder(tf.compat.v1.float32, shape=[], name='res_training') minibatch_size = tf.compat.v1.placeholder(tf.compat.v1.int64, shape=[], name='minibatch_size') # Network gen_loss, disc_loss, output_images = getattr(network, config.network)( image_inputs, noise_inputs, latent_size=config.latent_size, minibatch_size=minibatch_size, res_building=int(np.log2(config.image_size)), res_training=res_training) var_list = tf.compat.v1.global_variables() # Store the list of variables # Optimizer lr = tf.compat.v1.placeholder(tf.compat.v1.float32, name='lr') optimizer_disc = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, **config.optimzer_kwargs) optimizer_gen = tf.compat.v1.train.AdamOptimizer(learning_rate=lr, **config.optimzer_kwargs) disc_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator') gen_vars = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.TRAINABLE_VARIABLES, scope='generator') training_op_disc = optimizer_disc.minimize(disc_loss, var_list=disc_vars, name='training_op_disc') training_op_gen = optimizer_gen.minimize(gen_loss, var_list=gen_vars, name='training_op_gen') # Initializer init = tf.compat.v1.global_variables_initializer() print('Done: graph built.') # Init dataset inputs,_ = getattr(dataset, config.data_initilize)(**config.data_initilize_kwargs) # Define the minibatch selector select_minibatch = partial(getattr(dataset, config.data_selector), inputs) # Saver saver = train.Saver(var_list,config.logs_path, {'gen_loss':gen_loss, 'disc_loss':disc_loss}) # Save minibatch test minibatch = select_minibatch(crt_img=0, res=config.image_size, minibatch_size=config.nbof_test_sample)/2 +0.5 saver.save_images(minibatch, 'test') # Time measurements graph_time = time() # Training --> use the configuration file print('Training...') with tf.compat.v1.Session() as sess: # Initialize init.run() # Restore former parameters if config.restore: print('Restoring weight stored in {}'.format(config.restore)) saver.restore(sess, config.restore) # Training parameters noise_test = 2*np.random.random((config.nbof_test_sample, config.latent_size))-1 # Training ... cur_img = config.start_img while cur_img < config.end_img: # Change input dataset cur_res = training_schedule(cur_img) cur_minibatch_size = config.minibatch_size[int(np.ceil(cur_res))] minibatch = select_minibatch(crt_img=cur_img, res=2**int(np.ceil(cur_res)), minibatch_size=cur_minibatch_size) noises = 2*np.random.random((cur_minibatch_size, config.latent_size))-1 feed_dict = {} feed_dict['image_inputs:0'] = minibatch feed_dict['noise_inputs:0'] = noises feed_dict['lr:0'] = config.learning_rate feed_dict['res_training:0'] = cur_res feed_dict['minibatch_size:0'] = cur_minibatch_size sess.run([training_op_disc], feed_dict=feed_dict) sess.run([training_op_gen], feed_dict=feed_dict) # Display time information if cur_img % config.img_per_images == 0: graph_time = time() - graph_time minutes = int(graph_time // 60) secondes = graph_time - minutes * 60 print('{} kimgs: {:4d} minutes {:2f} secondes, {:2f} resolution'.format(cur_img//1000, minutes, secondes, cur_res)) graph_time = time() # Save logs if cur_img % config.img_per_summary == 0: saver.save_summary(sess, feed_dict, cur_img) # Save images if cur_img % config.img_per_images == 0: feed_dict = {noise_inputs:noise_test, res_training:cur_res} outputs = output_images.eval(feed_dict=feed_dict) outputs = outputs / 2 + 0.5 # Re scale output images outputs = np.clip(outputs, 0, 1) saver.save_images(outputs, cur_img//1000) # Save model if cur_img % config.img_per_save == 0: saver.save_model(sess, cur_img//1000) # Update current image cur_img += cur_minibatch_size # Final Saving saver.save_model(sess, 'final') saver.close_summary() print('Done: training') #---------------------------------------------------------------------------- # Test if __name__=='__main__': exec(config.training_function) #----------------------------------------------------------------------------
""" A* algorithm Author: Weicent randomly generate obstacles, start and goal point searching path from start and end simultaneously """ import numpy as np import matplotlib.pyplot as plt import math show_animation = True class Node: """node with properties of g, h, coordinate and parent node""" def __init__(self, G=0, H=0, coordinate=None, parent=None): self.G = G self.H = H self.F = G + H self.parent = parent self.coordinate = coordinate def reset_f(self): self.F = self.G + self.H def hcost(node_coordinate, goal): dx = abs(node_coordinate[0] - goal[0]) dy = abs(node_coordinate[1] - goal[1]) hcost = dx + dy return hcost def gcost(fixed_node, update_node_coordinate): dx = abs(fixed_node.coordinate[0] - update_node_coordinate[0]) dy = abs(fixed_node.coordinate[1] - update_node_coordinate[1]) gc = math.hypot(dx, dy) # gc = move from fixed_node to update_node gcost = fixed_node.G + gc # gcost = move from start point to update_node return gcost def boundary_and_obstacles(start, goal, top_vertex, bottom_vertex, obs_number): """ :param start: start coordinate :param goal: goal coordinate :param top_vertex: top right vertex coordinate of boundary :param bottom_vertex: bottom left vertex coordinate of boundary :param obs_number: number of obstacles generated in the map :return: boundary_obstacle array, obstacle list """ # below can be merged into a rectangle boundary ay = list(range(bottom_vertex[1], top_vertex[1])) ax = [bottom_vertex[0]] * len(ay) cy = ay cx = [top_vertex[0]] * len(cy) bx = list(range(bottom_vertex[0] + 1, top_vertex[0])) by = [bottom_vertex[1]] * len(bx) dx = [bottom_vertex[0]] + bx + [top_vertex[0]] dy = [top_vertex[1]] * len(dx) # generate random obstacles ob_x = np.random.randint(bottom_vertex[0] + 1, top_vertex[0], obs_number).tolist() ob_y = np.random.randint(bottom_vertex[1] + 1, top_vertex[1], obs_number).tolist() # x y coordinate in certain order for boundary x = ax + bx + cx + dx y = ay + by + cy + dy obstacle = np.vstack((ob_x, ob_y)).T.tolist() # remove start and goal coordinate in obstacle list obstacle = [coor for coor in obstacle if coor != start and coor != goal] obs_array = np.array(obstacle) bound = np.vstack((x, y)).T bound_obs = np.vstack((bound, obs_array)) return bound_obs, obstacle def find_neighbor(node, ob, closed): # generate neighbors in certain condition ob_list = ob.tolist() neighbor: list = [] for x in range(node.coordinate[0] - 1, node.coordinate[0] + 2): for y in range(node.coordinate[1] - 1, node.coordinate[1] + 2): if [x, y] not in ob_list: # find all possible neighbor nodes neighbor.append([x, y]) # remove node violate the motion rule # 1. remove node.coordinate itself neighbor.remove(node.coordinate) # 2. remove neighbor nodes who cross through two diagonal # positioned obstacles since there is no enough space for # robot to go through two diagonal positioned obstacles # top bottom left right neighbors of node top_nei = [node.coordinate[0], node.coordinate[1] + 1] bottom_nei = [node.coordinate[0], node.coordinate[1] - 1] left_nei = [node.coordinate[0] - 1, node.coordinate[1]] right_nei = [node.coordinate[0] + 1, node.coordinate[1]] # neighbors in four vertex lt_nei = [node.coordinate[0] - 1, node.coordinate[1] + 1] rt_nei = [node.coordinate[0] + 1, node.coordinate[1] + 1] lb_nei = [node.coordinate[0] - 1, node.coordinate[1] - 1] rb_nei = [node.coordinate[0] + 1, node.coordinate[1] - 1] # remove the unnecessary neighbors if top_nei and left_nei in ob_list and lt_nei in neighbor: neighbor.remove(lt_nei) if top_nei and right_nei in ob_list and rt_nei in neighbor: neighbor.remove(rt_nei) if bottom_nei and left_nei in ob_list and lb_nei in neighbor: neighbor.remove(lb_nei) if bottom_nei and right_nei in ob_list and rb_nei in neighbor: neighbor.remove(rb_nei) neighbor = [x for x in neighbor if x not in closed] return neighbor def find_node_index(coordinate, node_list): # find node index in the node list via its coordinate ind = 0 for node in node_list: if node.coordinate == coordinate: target_node = node ind = node_list.index(target_node) break return ind def find_path(open_list, closed_list, goal, obstacle): # searching for the path, update open and closed list # obstacle = obstacle and boundary flag = len(open_list) for i in range(flag): node = open_list[0] open_coordinate_list = [node.coordinate for node in open_list] closed_coordinate_list = [node.coordinate for node in closed_list] temp = find_neighbor(node, obstacle, closed_coordinate_list) for element in temp: if element in closed_list: continue elif element in open_coordinate_list: # if node in open list, update g value ind = open_coordinate_list.index(element) new_g = gcost(node, element) if new_g <= open_list[ind].G: open_list[ind].G = new_g open_list[ind].reset_f() open_list[ind].parent = node else: # new coordinate, create corresponding node ele_node = Node(coordinate=element, parent=node, G=gcost(node, element), H=hcost(element, goal)) open_list.append(ele_node) open_list.remove(node) closed_list.append(node) open_list.sort(key=lambda x: x.F) return open_list, closed_list def node_to_coordinate(node_list): # convert node list into coordinate list and array coordinate_list = [node.coordinate for node in node_list] return coordinate_list def check_node_coincide(close_ls1, closed_ls2): """ :param close_ls1: node closed list for searching from start :param closed_ls2: node closed list for searching from end :return: intersect node list for above two """ # check if node in close_ls1 intersect with node in closed_ls2 cl1 = node_to_coordinate(close_ls1) cl2 = node_to_coordinate(closed_ls2) intersect_ls = [node for node in cl1 if node in cl2] return intersect_ls def find_surrounding(coordinate, obstacle): # find obstacles around node, help to draw the borderline boundary: list = [] for x in range(coordinate[0] - 1, coordinate[0] + 2): for y in range(coordinate[1] - 1, coordinate[1] + 2): if [x, y] in obstacle: boundary.append([x, y]) return boundary def get_border_line(node_closed_ls, obstacle): # if no path, find border line which confine goal or robot border: list = [] coordinate_closed_ls = node_to_coordinate(node_closed_ls) for coordinate in coordinate_closed_ls: temp = find_surrounding(coordinate, obstacle) border = border + temp border_ary = np.array(border) return border_ary def get_path(org_list, goal_list, coordinate): # get path from start to end path_org: list = [] path_goal: list = [] ind = find_node_index(coordinate, org_list) node = org_list[ind] while node != org_list[0]: path_org.append(node.coordinate) node = node.parent path_org.append(org_list[0].coordinate) ind = find_node_index(coordinate, goal_list) node = goal_list[ind] while node != goal_list[0]: path_goal.append(node.coordinate) node = node.parent path_goal.append(goal_list[0].coordinate) path_org.reverse() path = path_org + path_goal path = np.array(path) return path def random_coordinate(bottom_vertex, top_vertex): # generate random coordinates inside maze coordinate = [np.random.randint(bottom_vertex[0] + 1, top_vertex[0]), np.random.randint(bottom_vertex[1] + 1, top_vertex[1])] return coordinate def draw(close_origin, close_goal, start, end, bound): # plot the map if not close_goal.tolist(): # ensure the close_goal not empty # in case of the obstacle number is really large (>4500), the # origin is very likely blocked at the first search, and then # the program is over and the searching from goal to origin # will not start, which remain the closed_list for goal == [] # in order to plot the map, add the end coordinate to array close_goal = np.array([end]) plt.cla() plt.gcf().set_size_inches(11, 9, forward=True) plt.axis('equal') plt.plot(close_origin[:, 0], close_origin[:, 1], 'oy') plt.plot(close_goal[:, 0], close_goal[:, 1], 'og') plt.plot(bound[:, 0], bound[:, 1], 'sk') plt.plot(end[0], end[1], '*b', label='Goal') plt.plot(start[0], start[1], '^b', label='Origin') plt.legend() plt.pause(0.0001) def draw_control(org_closed, goal_closed, flag, start, end, bound, obstacle): """ control the plot process, evaluate if the searching finished flag == 0 : draw the searching process and plot path flag == 1 or 2 : start or end is blocked, draw the border line """ stop_loop = 0 # stop sign for the searching org_closed_ls = node_to_coordinate(org_closed) org_array = np.array(org_closed_ls) goal_closed_ls = node_to_coordinate(goal_closed) goal_array = np.array(goal_closed_ls) path = None if show_animation: # draw the searching process draw(org_array, goal_array, start, end, bound) if flag == 0: node_intersect = check_node_coincide(org_closed, goal_closed) if node_intersect: # a path is find path = get_path(org_closed, goal_closed, node_intersect[0]) stop_loop = 1 print('Path is find!') if show_animation: # draw the path plt.plot(path[:, 0], path[:, 1], '-r') plt.title('Robot Arrived', size=20, loc='center') plt.pause(0.01) plt.show() elif flag == 1: # start point blocked first stop_loop = 1 print('There is no path to the goal! Start point is blocked!') elif flag == 2: # end point blocked first stop_loop = 1 print('There is no path to the goal! End point is blocked!') if show_animation: # blocked case, draw the border line info = 'There is no path to the goal!' \ ' Robot&Goal are split by border' \ ' shown in red \'x\'!' if flag == 1: border = get_border_line(org_closed, obstacle) plt.plot(border[:, 0], border[:, 1], 'xr') plt.title(info, size=14, loc='center') plt.pause(0.01) plt.show() elif flag == 2: border = get_border_line(goal_closed, obstacle) plt.plot(border[:, 0], border[:, 1], 'xr') plt.title(info, size=14, loc='center') plt.pause(0.01) plt.show() return stop_loop, path def searching_control(start, end, bound, obstacle): """manage the searching process, start searching from two side""" # initial origin node and end node origin = Node(coordinate=start, H=hcost(start, end)) goal = Node(coordinate=end, H=hcost(end, start)) # list for searching from origin to goal origin_open: list = [origin] origin_close: list = [] # list for searching from goal to origin goal_open = [goal] goal_close: list = [] # initial target target_goal = end # flag = 0 (not blocked) 1 (start point blocked) 2 (end point blocked) flag = 0 # init flag path = None while True: # searching from start to end origin_open, origin_close = \ find_path(origin_open, origin_close, target_goal, bound) if not origin_open: # no path condition flag = 1 # origin node is blocked draw_control(origin_close, goal_close, flag, start, end, bound, obstacle) break # update target for searching from end to start target_origin = min(origin_open, key=lambda x: x.F).coordinate # searching from end to start goal_open, goal_close = \ find_path(goal_open, goal_close, target_origin, bound) if not goal_open: # no path condition flag = 2 # goal is blocked draw_control(origin_close, goal_close, flag, start, end, bound, obstacle) break # update target for searching from start to end target_goal = min(goal_open, key=lambda x: x.F).coordinate # continue searching, draw the process stop_sign, path = draw_control(origin_close, goal_close, flag, start, end, bound, obstacle) if stop_sign: break return path def main(obstacle_number=1500): print(__file__ + ' start!') top_vertex = [60, 60] # top right vertex of boundary bottom_vertex = [0, 0] # bottom left vertex of boundary # generate start and goal point randomly start = random_coordinate(bottom_vertex, top_vertex) end = random_coordinate(bottom_vertex, top_vertex) # generate boundary and obstacles bound, obstacle = boundary_and_obstacles(start, end, top_vertex, bottom_vertex, obstacle_number) path = searching_control(start, end, bound, obstacle) if not show_animation: print(path) if __name__ == '__main__': main(obstacle_number=1500)
import sys import os from io import StringIO import textwrap from distutils.core import Distribution from distutils.command.build_ext import build_ext from distutils import sysconfig from distutils.tests.support import (TempdirManager, LoggingSilencer, copy_xxmodule_c, fixup_build_ext) from distutils.extension import Extension from distutils.errors import ( CompileError, DistutilsPlatformError, DistutilsSetupError, UnknownFileError) import unittest from test import support from test.support import os_helper from test.support.script_helper import assert_python_ok from test.support import threading_helper # http://bugs.python.org/issue4373 # Don't load the xx module more than once. ALREADY_TESTED = False class BuildExtTestCase(TempdirManager, LoggingSilencer, unittest.TestCase): def setUp(self): # Create a simple test environment super(BuildExtTestCase, self).setUp() self.tmp_dir = self.mkdtemp() import site self.old_user_base = site.USER_BASE site.USER_BASE = self.mkdtemp() from distutils.command import build_ext build_ext.USER_BASE = site.USER_BASE self.old_config_vars = dict(sysconfig._config_vars) # bpo-30132: On Windows, a .pdb file may be created in the current # working directory. Create a temporary working directory to cleanup # everything at the end of the test. change_cwd = os_helper.change_cwd(self.tmp_dir) change_cwd.__enter__() self.addCleanup(change_cwd.__exit__, None, None, None) def tearDown(self): import site site.USER_BASE = self.old_user_base from distutils.command import build_ext build_ext.USER_BASE = self.old_user_base sysconfig._config_vars.clear() sysconfig._config_vars.update(self.old_config_vars) super(BuildExtTestCase, self).tearDown() def build_ext(self, *args, **kwargs): return build_ext(*args, **kwargs) @support.requires_subprocess() def test_build_ext(self): cmd = support.missing_compiler_executable() if cmd is not None: self.skipTest('The %r command is not found' % cmd) global ALREADY_TESTED copy_xxmodule_c(self.tmp_dir) xx_c = os.path.join(self.tmp_dir, 'xxmodule.c') xx_ext = Extension('xx', [xx_c]) dist = Distribution({'name': 'xx', 'ext_modules': [xx_ext]}) dist.package_dir = self.tmp_dir cmd = self.build_ext(dist) fixup_build_ext(cmd) cmd.build_lib = self.tmp_dir cmd.build_temp = self.tmp_dir old_stdout = sys.stdout if not support.verbose: # silence compiler output sys.stdout = StringIO() try: cmd.ensure_finalized() cmd.run() finally: sys.stdout = old_stdout if ALREADY_TESTED: self.skipTest('Already tested in %s' % ALREADY_TESTED) else: ALREADY_TESTED = type(self).__name__ code = textwrap.dedent(f""" tmp_dir = {self.tmp_dir!r} import sys import unittest from test import support sys.path.insert(0, tmp_dir) import xx class Tests(unittest.TestCase): def test_xx(self): for attr in ('error', 'foo', 'new', 'roj'): self.assertTrue(hasattr(xx, attr)) self.assertEqual(xx.foo(2, 5), 7) self.assertEqual(xx.foo(13,15), 28) self.assertEqual(xx.new().demo(), None) if support.HAVE_DOCSTRINGS: doc = 'This is a template module just for instruction.' self.assertEqual(xx.__doc__, doc) self.assertIsInstance(xx.Null(), xx.Null) self.assertIsInstance(xx.Str(), xx.Str) unittest.main() """) assert_python_ok('-c', code) def test_solaris_enable_shared(self): dist = Distribution({'name': 'xx'}) cmd = self.build_ext(dist) old = sys.platform sys.platform = 'sunos' # fooling finalize_options from distutils.sysconfig import _config_vars old_var = _config_vars.get('Py_ENABLE_SHARED') _config_vars['Py_ENABLE_SHARED'] = 1 try: cmd.ensure_finalized() finally: sys.platform = old if old_var is None: del _config_vars['Py_ENABLE_SHARED'] else: _config_vars['Py_ENABLE_SHARED'] = old_var # make sure we get some library dirs under solaris self.assertGreater(len(cmd.library_dirs), 0) def test_user_site(self): import site dist = Distribution({'name': 'xx'}) cmd = self.build_ext(dist) # making sure the user option is there options = [name for name, short, lable in cmd.user_options] self.assertIn('user', options) # setting a value cmd.user = 1 # setting user based lib and include lib = os.path.join(site.USER_BASE, 'lib') incl = os.path.join(site.USER_BASE, 'include') os.mkdir(lib) os.mkdir(incl) # let's run finalize cmd.ensure_finalized() # see if include_dirs and library_dirs # were set self.assertIn(lib, cmd.library_dirs) self.assertIn(lib, cmd.rpath) self.assertIn(incl, cmd.include_dirs) @threading_helper.requires_working_threading() def test_optional_extension(self): # this extension will fail, but let's ignore this failure # with the optional argument. modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = self.build_ext(dist) cmd.ensure_finalized() self.assertRaises((UnknownFileError, CompileError), cmd.run) # should raise an error modules = [Extension('foo', ['xxx'], optional=True)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = self.build_ext(dist) cmd.ensure_finalized() cmd.run() # should pass def test_finalize_options(self): # Make sure Python's include directories (for Python.h, pyconfig.h, # etc.) are in the include search path. modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = self.build_ext(dist) cmd.finalize_options() py_include = sysconfig.get_python_inc() for p in py_include.split(os.path.pathsep): self.assertIn(p, cmd.include_dirs) plat_py_include = sysconfig.get_python_inc(plat_specific=1) for p in plat_py_include.split(os.path.pathsep): self.assertIn(p, cmd.include_dirs) # make sure cmd.libraries is turned into a list # if it's a string cmd = self.build_ext(dist) cmd.libraries = 'my_lib, other_lib lastlib' cmd.finalize_options() self.assertEqual(cmd.libraries, ['my_lib', 'other_lib', 'lastlib']) # make sure cmd.library_dirs is turned into a list # if it's a string cmd = self.build_ext(dist) cmd.library_dirs = 'my_lib_dir%sother_lib_dir' % os.pathsep cmd.finalize_options() self.assertIn('my_lib_dir', cmd.library_dirs) self.assertIn('other_lib_dir', cmd.library_dirs) # make sure rpath is turned into a list # if it's a string cmd = self.build_ext(dist) cmd.rpath = 'one%stwo' % os.pathsep cmd.finalize_options() self.assertEqual(cmd.rpath, ['one', 'two']) # make sure cmd.link_objects is turned into a list # if it's a string cmd = build_ext(dist) cmd.link_objects = 'one two,three' cmd.finalize_options() self.assertEqual(cmd.link_objects, ['one', 'two', 'three']) # XXX more tests to perform for win32 # make sure define is turned into 2-tuples # strings if they are ','-separated strings cmd = self.build_ext(dist) cmd.define = 'one,two' cmd.finalize_options() self.assertEqual(cmd.define, [('one', '1'), ('two', '1')]) # make sure undef is turned into a list of # strings if they are ','-separated strings cmd = self.build_ext(dist) cmd.undef = 'one,two' cmd.finalize_options() self.assertEqual(cmd.undef, ['one', 'two']) # make sure swig_opts is turned into a list cmd = self.build_ext(dist) cmd.swig_opts = None cmd.finalize_options() self.assertEqual(cmd.swig_opts, []) cmd = self.build_ext(dist) cmd.swig_opts = '1 2' cmd.finalize_options() self.assertEqual(cmd.swig_opts, ['1', '2']) def test_check_extensions_list(self): dist = Distribution() cmd = self.build_ext(dist) cmd.finalize_options() #'extensions' option must be a list of Extension instances self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, 'foo') # each element of 'ext_modules' option must be an # Extension instance or 2-tuple exts = [('bar', 'foo', 'bar'), 'foo'] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # first element of each tuple in 'ext_modules' # must be the extension name (a string) and match # a python dotted-separated name exts = [('foo-bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # second element of each tuple in 'ext_modules' # must be a dictionary (build info) exts = [('foo.bar', '')] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) # ok this one should pass exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', 'some': 'bar'})] cmd.check_extensions_list(exts) ext = exts[0] self.assertIsInstance(ext, Extension) # check_extensions_list adds in ext the values passed # when they are in ('include_dirs', 'library_dirs', 'libraries' # 'extra_objects', 'extra_compile_args', 'extra_link_args') self.assertEqual(ext.libraries, 'foo') self.assertFalse(hasattr(ext, 'some')) # 'macros' element of build info dict must be 1- or 2-tuple exts = [('foo.bar', {'sources': [''], 'libraries': 'foo', 'some': 'bar', 'macros': [('1', '2', '3'), 'foo']})] self.assertRaises(DistutilsSetupError, cmd.check_extensions_list, exts) exts[0][1]['macros'] = [('1', '2'), ('3',)] cmd.check_extensions_list(exts) self.assertEqual(exts[0].undef_macros, ['3']) self.assertEqual(exts[0].define_macros, [('1', '2')]) def test_get_source_files(self): modules = [Extension('foo', ['xxx'], optional=False)] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = self.build_ext(dist) cmd.ensure_finalized() self.assertEqual(cmd.get_source_files(), ['xxx']) def test_unicode_module_names(self): modules = [ Extension('foo', ['aaa'], optional=False), Extension('föö', ['uuu'], optional=False), ] dist = Distribution({'name': 'xx', 'ext_modules': modules}) cmd = self.build_ext(dist) cmd.ensure_finalized() self.assertRegex(cmd.get_ext_filename(modules[0].name), r'foo(_d)?\..*') self.assertRegex(cmd.get_ext_filename(modules[1].name), r'föö(_d)?\..*') self.assertEqual(cmd.get_export_symbols(modules[0]), ['PyInit_foo']) self.assertEqual(cmd.get_export_symbols(modules[1]), ['PyInitU_f_gkaa']) def test_compiler_option(self): # cmd.compiler is an option and # should not be overridden by a compiler instance # when the command is run dist = Distribution() cmd = self.build_ext(dist) cmd.compiler = 'unix' cmd.ensure_finalized() cmd.run() self.assertEqual(cmd.compiler, 'unix') @support.requires_subprocess() def test_get_outputs(self): cmd = support.missing_compiler_executable() if cmd is not None: self.skipTest('The %r command is not found' % cmd) tmp_dir = self.mkdtemp() c_file = os.path.join(tmp_dir, 'foo.c') self.write_file(c_file, 'void PyInit_foo(void) {}\n') ext = Extension('foo', [c_file], optional=False) dist = Distribution({'name': 'xx', 'ext_modules': [ext]}) cmd = self.build_ext(dist) fixup_build_ext(cmd) cmd.ensure_finalized() self.assertEqual(len(cmd.get_outputs()), 1) cmd.build_lib = os.path.join(self.tmp_dir, 'build') cmd.build_temp = os.path.join(self.tmp_dir, 'tempt') # issue #5977 : distutils build_ext.get_outputs # returns wrong result with --inplace other_tmp_dir = os.path.realpath(self.mkdtemp()) old_wd = os.getcwd() os.chdir(other_tmp_dir) try: cmd.inplace = 1 cmd.run() so_file = cmd.get_outputs()[0] finally: os.chdir(old_wd) self.assertTrue(os.path.exists(so_file)) ext_suffix = sysconfig.get_config_var('EXT_SUFFIX') self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, other_tmp_dir) cmd.inplace = 0 cmd.compiler = None cmd.run() so_file = cmd.get_outputs()[0] self.assertTrue(os.path.exists(so_file)) self.assertTrue(so_file.endswith(ext_suffix)) so_dir = os.path.dirname(so_file) self.assertEqual(so_dir, cmd.build_lib) # inplace = 0, cmd.package = 'bar' build_py = cmd.get_finalized_command('build_py') build_py.package_dir = {'': 'bar'} path = cmd.get_ext_fullpath('foo') # checking that the last directory is the build_dir path = os.path.split(path)[0] self.assertEqual(path, cmd.build_lib) # inplace = 1, cmd.package = 'bar' cmd.inplace = 1 other_tmp_dir = os.path.realpath(self.mkdtemp()) old_wd = os.getcwd() os.chdir(other_tmp_dir) try: path = cmd.get_ext_fullpath('foo') finally: os.chdir(old_wd) # checking that the last directory is bar path = os.path.split(path)[0] lastdir = os.path.split(path)[-1] self.assertEqual(lastdir, 'bar') def test_ext_fullpath(self): ext = sysconfig.get_config_var('EXT_SUFFIX') # building lxml.etree inplace #etree_c = os.path.join(self.tmp_dir, 'lxml.etree.c') #etree_ext = Extension('lxml.etree', [etree_c]) #dist = Distribution({'name': 'lxml', 'ext_modules': [etree_ext]}) dist = Distribution() cmd = self.build_ext(dist) cmd.inplace = 1 cmd.distribution.package_dir = {'': 'src'} cmd.distribution.packages = ['lxml', 'lxml.html'] curdir = os.getcwd() wanted = os.path.join(curdir, 'src', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') self.assertEqual(wanted, path) # building lxml.etree not inplace cmd.inplace = 0 cmd.build_lib = os.path.join(curdir, 'tmpdir') wanted = os.path.join(curdir, 'tmpdir', 'lxml', 'etree' + ext) path = cmd.get_ext_fullpath('lxml.etree') self.assertEqual(wanted, path) # building twisted.runner.portmap not inplace build_py = cmd.get_finalized_command('build_py') build_py.package_dir = {} cmd.distribution.packages = ['twisted', 'twisted.runner.portmap'] path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'tmpdir', 'twisted', 'runner', 'portmap' + ext) self.assertEqual(wanted, path) # building twisted.runner.portmap inplace cmd.inplace = 1 path = cmd.get_ext_fullpath('twisted.runner.portmap') wanted = os.path.join(curdir, 'twisted', 'runner', 'portmap' + ext) self.assertEqual(wanted, path) @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') def test_deployment_target_default(self): # Issue 9516: Test that, in the absence of the environment variable, # an extension module is compiled with the same deployment target as # the interpreter. self._try_compile_deployment_target('==', None) @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') def test_deployment_target_too_low(self): # Issue 9516: Test that an extension module is not allowed to be # compiled with a deployment target less than that of the interpreter. self.assertRaises(DistutilsPlatformError, self._try_compile_deployment_target, '>', '10.1') @unittest.skipUnless(sys.platform == 'darwin', 'test only relevant for MacOSX') def test_deployment_target_higher_ok(self): # Issue 9516: Test that an extension module can be compiled with a # deployment target higher than that of the interpreter: the ext # module may depend on some newer OS feature. deptarget = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') if deptarget: # increment the minor version number (i.e. 10.6 -> 10.7) deptarget = [int(x) for x in deptarget.split('.')] deptarget[-1] += 1 deptarget = '.'.join(str(i) for i in deptarget) self._try_compile_deployment_target('<', deptarget) def _try_compile_deployment_target(self, operator, target): orig_environ = os.environ os.environ = orig_environ.copy() self.addCleanup(setattr, os, 'environ', orig_environ) if target is None: if os.environ.get('MACOSX_DEPLOYMENT_TARGET'): del os.environ['MACOSX_DEPLOYMENT_TARGET'] else: os.environ['MACOSX_DEPLOYMENT_TARGET'] = target deptarget_c = os.path.join(self.tmp_dir, 'deptargetmodule.c') with open(deptarget_c, 'w') as fp: fp.write(textwrap.dedent('''\ #include <AvailabilityMacros.h> int dummy; #if TARGET %s MAC_OS_X_VERSION_MIN_REQUIRED #else #error "Unexpected target" #endif ''' % operator)) # get the deployment target that the interpreter was built with target = sysconfig.get_config_var('MACOSX_DEPLOYMENT_TARGET') target = tuple(map(int, target.split('.')[0:2])) # format the target value as defined in the Apple # Availability Macros. We can't use the macro names since # at least one value we test with will not exist yet. if target[:2] < (10, 10): # for 10.1 through 10.9.x -> "10n0" target = '%02d%01d0' % target else: # for 10.10 and beyond -> "10nn00" if len(target) >= 2: target = '%02d%02d00' % target else: # 11 and later can have no minor version (11 instead of 11.0) target = '%02d0000' % target deptarget_ext = Extension( 'deptarget', [deptarget_c], extra_compile_args=['-DTARGET=%s'%(target,)], ) dist = Distribution({ 'name': 'deptarget', 'ext_modules': [deptarget_ext] }) dist.package_dir = self.tmp_dir cmd = self.build_ext(dist) cmd.build_lib = self.tmp_dir cmd.build_temp = self.tmp_dir try: old_stdout = sys.stdout if not support.verbose: # silence compiler output sys.stdout = StringIO() try: cmd.ensure_finalized() cmd.run() finally: sys.stdout = old_stdout except CompileError: self.fail("Wrong deployment target during compilation") class ParallelBuildExtTestCase(BuildExtTestCase): def build_ext(self, *args, **kwargs): build_ext = super().build_ext(*args, **kwargs) build_ext.parallel = True return build_ext def test_suite(): suite = unittest.TestSuite() suite.addTest(unittest.TestLoader().loadTestsFromTestCase(BuildExtTestCase)) suite.addTest(unittest.TestLoader().loadTestsFromTestCase(ParallelBuildExtTestCase)) return suite if __name__ == '__main__': support.run_unittest(__name__)
#!/usr/bin/env python3 # Copyright (c) 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Tests for json_values_converter.py. It tests json_values_converter.py. """ import argparse import os import sys def CompareFiles(file1, file2): return open(file1, 'r').read() == open(file2, 'r').read() def TouchStamp(stamp_path): dir_name = os.path.dirname(stamp_path) if not os.path.isdir(dir_name): os.makedirs(dir_name) with open(stamp_path, 'a'): os.utime(stamp_path, None) def main(): parser = argparse.ArgumentParser() parser.add_argument('--stamp', help='Path to touch on success.') parser.add_argument('files', nargs='+', help='Files to compare.') args = parser.parse_args() passed = True for i, j in zip(args.files[::2], args.files[1::2]): passed = passed and CompareFiles(i, j) if passed and args.stamp: TouchStamp(args.stamp) return not passed if __name__ == '__main__': sys.exit(main())
""" GARS Field is a toolkit for generating Global Area Reference grids """ from .edgarsgrid import EDGARSGrid # noqa: F401 from .field import GARSField # noqa: F401 from .garsgrid import GARSGrid # noqa: F401 from .gedgarsgrid import GEDGARSGrid # noqa: F401
""" player """ from __future__ import absolute_import, division, print_function, unicode_literals import logging from mcedit2.rendering.chunkmeshes.entity.biped import ModelBiped from mcedit2.rendering.chunkmeshes.entity.modelrenderer import ModelRenderer log = logging.getLogger(__name__) class ModelPlayer(ModelBiped): id = "MCEDIT_Player" textureWidth = 64 textureHeight = 64 modelTexture = None def __init__(self, expandOffset=0.0, headOffset=0.0, smallArms=False): super(ModelPlayer, self).__init__(expandOffset, headOffset) self.smallArms = smallArms self.bipedCape = ModelRenderer(self, 0, 0) # self.bipedCape.setTextureSize(64, 32) self.bipedCape.addBox(-5.0, 0.0, -1.0, 10, 16, 1, expandOffset) if smallArms: self.bipedLeftArm = ModelRenderer(self, 32, 48) self.bipedLeftArm.addBox(-1.0, -2.0, -2.0, 3, 12, 4, expandOffset) self.bipedLeftArm.setCenterPoint(5.0, 2.5, 0.0) self.bipedRightArm = ModelRenderer(self, 40, 16) self.bipedRightArm.addBox(-2.0, -2.0, -2.0, 3, 12, 4, expandOffset) self.bipedRightArm.setCenterPoint(-5.0, 2.5, 0.0) self.bipedLeftArmwear = ModelRenderer(self, 48, 48) self.bipedLeftArmwear.addBox(-1.0, -2.0, -2.0, 3, 12, 4, expandOffset + 0.25) self.bipedLeftArmwear.setCenterPoint(5.0, 2.5, 0.0) self.bipedRightArmwear = ModelRenderer(self, 40, 32) self.bipedRightArmwear.addBox(-2.0, -2.0, -2.0, 3, 12, 4, expandOffset + 0.25) self.bipedRightArmwear.setCenterPoint(-5.0, 2.5, 10.0) else: self.bipedLeftArm = ModelRenderer(self, 32, 48) self.bipedLeftArm.addBox(-1.0, -2.0, -2.0, 4, 12, 4, expandOffset) self.bipedLeftArm.setCenterPoint(5.0, 2.0, 0.0) self.bipedLeftArmwear = ModelRenderer(self, 48, 48) self.bipedLeftArmwear.addBox(-1.0, -2.0, -2.0, 4, 12, 4, expandOffset + 0.25) self.bipedLeftArmwear.setCenterPoint(5.0, 2.0, 0.0) self.bipedRightArmwear = ModelRenderer(self, 40, 32) self.bipedRightArmwear.addBox(-3.0, -2.0, -2.0, 4, 12, 4, expandOffset + 0.25) self.bipedRightArmwear.setCenterPoint(-5.0, 2.0, 10.0) self.bipedLeftLeg = ModelRenderer(self, 16, 48) self.bipedLeftLeg.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset) self.bipedLeftLeg.setCenterPoint(1.9, 12.0, 0.0) self.bipedLeftLegwear = ModelRenderer(self, 0, 48) self.bipedLeftLegwear.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset + 0.25) self.bipedLeftLegwear.setCenterPoint(1.9, 12.0, 0.0) self.bipedRightLegwear = ModelRenderer(self, 0, 32) self.bipedRightLegwear.addBox(-2.0, 0.0, -2.0, 4, 12, 4, expandOffset + 0.25) self.bipedRightLegwear.setCenterPoint(-1.9, 12.0, 0.0) self.bipedBodyWear = ModelRenderer(self, 16, 32) self.bipedBodyWear.addBox(-4.0, 0.0, -2.0, 8, 12, 4, expandOffset + 0.25) self.bipedBodyWear.setCenterPoint(0.0, 0.0, 0.0) @property def parts(self): return [ self.bipedHead, self.bipedHeadwear, self.bipedBody, self.bipedBodyWear, self.bipedRightArm, self.bipedRightArmwear, self.bipedLeftArm, self.bipedLeftArmwear, self.bipedRightLeg, self.bipedRightLegwear, self.bipedLeftLeg, self.bipedLeftLegwear, ]
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for calculating gradients.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import enum from tensorflow.python.util.tf_export import tf_export @tf_export("UnconnectedGradients") class UnconnectedGradients(enum.Enum): """Controls how gradient computation behaves when y does not depend on x. The gradient of y with respect to x can be zero in two different ways: there could be no differentiable path in the graph connecting x to y (and so we can statically prove that the gradient is zero) or it could be that runtime values of tensors in a particular execution lead to a gradient of zero (say, if a relu unit happens to not be activated). To allow you to distinguish between these two cases you can choose what value gets returned for the gradient when there is no path in the graph from x to y: * `NONE`: Indicates that [None] will be returned if there is no path from x to y * `ZERO`: Indicates that a zero tensor will be returned in the shape of x. """ NONE = "none" ZERO = "zero"
from abc import ABC import time import timeit class Tracker(ABC): def __init__(self, tracker_type, timed=False): self.type = tracker_type self.tracker = self.type() self.timed = timed if timed: self.time = 0. self.ptime = 0. self.frames = 0 self.predict_frame = self._timed_predict_frame else: self.__getattribute__ = self._alt_getattr def _alt_getattr(self, attr): if attr == "_timed_predict_frame" or attr == "avg_time_per_frame" or attr == 'avg_cpu_time_per_frame': raise AttributeError return object.__getattribute__(self, attr) # invisible if timed is set to False def avg_time_per_frame(self): return self.time / self.frames # used to calculate cpu time (not cycles per se) per frame def avg_cpu_time_per_frame(self): return self.ptime / self.frames # same as _timed_predict_frame without the timing overhead def predict_frame(self, frame): success, bbox = self.tracker.update(frame) return success, bbox # invisible if timed is set to False def _timed_predict_frame(self, frame): t = time.process_time() t1 = timeit.default_timer() success, bbox = self.tracker.update(frame) self.ptime += time.process_time() - t self.time += timeit.default_timer() - t1 self.frames += 1 return success, bbox def clear(self): del self.tracker self.tracker = self.type() def _init_tracker(self, frame, bbox): bbox = tuple(map(int, bbox)) return self.tracker.init(frame, bbox) def predict_frames(self, frames, first_frame=None, bbox=None): frames = iter(frames) if bbox: self.clear() self._init_tracker(first_frame if first_frame is not None else next(frames), bbox) for frame in frames: yield frame, self.predict_frame(frame)
# -*- coding: utf-8 -*- # Generated by Django 1.9.5 on 2016-04-30 16:57 from __future__ import unicode_literals from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): initial = True dependencies = [ ('main', '0001_initial'), migrations.swappable_dependency(settings.AUTH_USER_MODEL), ] operations = [ migrations.CreateModel( name='Assignment', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True, null=True)), ('modified_date', models.DateTimeField(auto_now=True, null=True)), ('deleted_date', models.DateTimeField(blank=True, null=True)), ('name', models.CharField(max_length=64)), ('description', models.TextField(blank=True, null=True)), ('assigned_comments', models.ManyToManyField(blank=True, to='main.Comment')), ('assigned_submissions', models.ManyToManyField(blank=True, to='main.Submission')), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='Code', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True, null=True)), ('modified_date', models.DateTimeField(auto_now=True, null=True)), ('deleted_date', models.DateTimeField(blank=True, null=True)), ('name', models.CharField(max_length=64)), ('description', models.TextField(blank=True, null=True)), ('css_class', models.CharField(blank=True, max_length=64, null=True)), ('key', models.CharField(blank=True, max_length=1, null=True)), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_created_by', to=settings.AUTH_USER_MODEL)), ('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_deleted_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='code_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='CodeScheme', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True, null=True)), ('modified_date', models.DateTimeField(auto_now=True, null=True)), ('deleted_date', models.DateTimeField(blank=True, null=True)), ('name', models.CharField(max_length=64)), ('description', models.TextField()), ('mutually_exclusive', models.BooleanField(default=False)), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_created_by', to=settings.AUTH_USER_MODEL)), ('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_deleted_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='codescheme_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='CommentCodeInstance', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True, null=True)), ('modified_date', models.DateTimeField(auto_now=True, null=True)), ('deleted_date', models.DateTimeField(blank=True, null=True)), ('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')), ('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')), ('comment', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Comment')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_created_by', to=settings.AUTH_USER_MODEL)), ('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='commentcodeinstance_modified_by', to=settings.AUTH_USER_MODEL)), ], options={ 'abstract': False, }, ), migrations.CreateModel( name='SubmissionCodeInstance', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('created_date', models.DateTimeField(auto_now_add=True, null=True)), ('modified_date', models.DateTimeField(auto_now=True, null=True)), ('deleted_date', models.DateTimeField(blank=True, null=True)), ('assignment', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='coding.Assignment')), ('code', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.Code')), ('created_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_created_by', to=settings.AUTH_USER_MODEL)), ('deleted_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_deleted_by', to=settings.AUTH_USER_MODEL)), ('modified_by', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='submissioncodeinstance_modified_by', to=settings.AUTH_USER_MODEL)), ('submission', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='main.Submission')), ], options={ 'abstract': False, }, ), migrations.AddField( model_name='code', name='scheme', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='coding.CodeScheme'), ), migrations.AddField( model_name='assignment', name='code_schemes', field=models.ManyToManyField(to='coding.CodeScheme'), ), migrations.AddField( model_name='assignment', name='coder', field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='assignment', name='created_by', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_created_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='assignment', name='deleted_by', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_deleted_by', to=settings.AUTH_USER_MODEL), ), migrations.AddField( model_name='assignment', name='modified_by', field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='assignment_modified_by', to=settings.AUTH_USER_MODEL), ), ]
from dicom_parser.utils.sequence_detector.sequences.mr.func.bold import ( BOLD_RULES, ) from dicom_parser.utils.sequence_detector.sequences.mr.func.fieldmap import ( FUNCTIONAL_FIELDMAP_RULES, ) from dicom_parser.utils.sequence_detector.sequences.mr.func.sbref import ( FUNCTIONAL_SBREF_RULES, ) MR_FUNCTIONAL_SEQUENCES = { "bold": BOLD_RULES, "func_fieldmap": FUNCTIONAL_FIELDMAP_RULES, "func_sbref": FUNCTIONAL_SBREF_RULES, }
#!/usr/bin/env python import argparse import sys import os from datetime import datetime, timedelta import pandas as pd import yfinance as yf from alpha_vantage.timeseries import TimeSeries from prompt_toolkit.completion import NestedCompleter from gamestonk_terminal import config_terminal as cfg from gamestonk_terminal import feature_flags as gtff from gamestonk_terminal import thought_of_the_day as thought from gamestonk_terminal.discovery import disc_controller from gamestonk_terminal.due_diligence import dd_controller from gamestonk_terminal.fundamental_analysis import fa_controller from gamestonk_terminal.helper_funcs import b_is_stock_market_open, get_flair from gamestonk_terminal.main_helper import ( clear, export, load, print_help, view, candle, print_goodbye, ) from gamestonk_terminal.menu import session from gamestonk_terminal.papermill import papermill_controller as mill from gamestonk_terminal.behavioural_analysis import ba_controller from gamestonk_terminal.technical_analysis import ta_controller from gamestonk_terminal.comparison_analysis import ca_controller from gamestonk_terminal.exploratory_data_analysis import eda_controller from gamestonk_terminal.options import op_controller from gamestonk_terminal.econ import econ_controller from gamestonk_terminal.residuals_analysis import ra_controller from gamestonk_terminal.portfolio import port_controller from gamestonk_terminal.cryptocurrency import crypto_controller from gamestonk_terminal.screener import screener_controller from gamestonk_terminal.portfolio_optimization import po_controller from gamestonk_terminal.forex import fx_controller from gamestonk_terminal.backtesting import bt_controller from gamestonk_terminal.resource_collection import rc_controller from gamestonk_terminal.research import res_controller from gamestonk_terminal.government import gov_controller # pylint: disable=too-many-statements,too-many-branches def main(): """ Gamestonk Terminal is an awesome stock market terminal that has been developed for fun, while I saw my GME shares tanking. But hey, I like the stock. """ # Enable VT100 Escape Sequence for WINDOWS 10 Ver. 1607 if sys.platform == "win32": os.system("") s_ticker = "" s_start = "2015-01-01" df_stock = pd.DataFrame() s_interval = "1440min" # Set stock by default to speed up testing # s_ticker = "BB" # ts = TimeSeries(key=cfg.API_KEY_ALPHAVANTAGE, output_format='pandas') # df_stock, d_stock_metadata = ts.get_daily_adjusted(symbol=s_ticker, outputsize='full') # df_stock.sort_index(ascending=True, inplace=True) # s_start = datetime.strptime("2020-06-04", "%Y-%m-%d") # df_stock = df_stock[s_start:] # Add list of arguments that the main parser accepts menu_parser = argparse.ArgumentParser(add_help=False, prog="gamestonk_terminal") choices = [ "help", "quit", "q", "clear", "load", "candle", "view", "export", "disc", "scr", "mill", "ba", "res", "fa", "ta", "bt", "dd", "eda", "pred", "ca", "op", "econ", "pa", "crypto", "ra", "po", "fx", "rc", "gov", ] menu_parser.add_argument("opt", choices=choices) completer = NestedCompleter.from_nested_dict({c: None for c in choices}) try: if os.name == "nt": sys.stdin.reconfigure(encoding="utf-8") sys.stdout.reconfigure(encoding="utf-8") except Exception as e: print(e, "\n") # Print first welcome message and help print("\nWelcome to Gamestonk Terminal Ape.\n") should_print_help = True parsed_stdin = False if gtff.ENABLE_THOUGHTS_DAY: print("-------------------") try: thought.get_thought_of_the_day() except Exception as e: print(e) print("") # Loop forever and ever while True: main_cmd = False if should_print_help: print_help(s_ticker, s_start, s_interval, b_is_stock_market_open()) should_print_help = False if gtff.ENABLE_QUICK_EXIT: print("Quick exit enabled") break # Get input command from stdin or user if not parsed_stdin and len(sys.argv) > 1: as_input = " ".join(sys.argv[1:]) parsed_stdin = True print(f"{get_flair()}> {as_input}") elif session and gtff.USE_PROMPT_TOOLKIT: as_input = session.prompt(f"{get_flair()}> ", completer=completer) else: as_input = input(f"{get_flair()}> ") # Is command empty if not as_input: print("") continue # Parse main command of the list of possible commands try: (ns_known_args, l_args) = menu_parser.parse_known_args(as_input.split()) except SystemExit: print("The command selected doesn't exist\n") continue b_quit = False if ns_known_args.opt == "help": should_print_help = True elif (ns_known_args.opt == "quit") or (ns_known_args.opt == "q"): break elif ns_known_args.opt == "clear": s_ticker, s_start, s_interval, df_stock = clear( l_args, s_ticker, s_start, s_interval, df_stock ) main_cmd = True elif ns_known_args.opt == "load": s_ticker, s_start, s_interval, df_stock = load( l_args, s_ticker, s_start, s_interval, df_stock ) main_cmd = True elif ns_known_args.opt == "candle": if s_ticker: candle( s_ticker, (datetime.now() - timedelta(days=180)).strftime("%Y-%m-%d"), ) else: print( "No ticker selected. Use 'load ticker' to load the ticker you want to look at.", "\n", ) main_cmd = True elif ns_known_args.opt == "view": if s_ticker: view(l_args, s_ticker, s_start, s_interval, df_stock) else: print( "No ticker selected. Use 'load ticker' to load the ticker you want to look at.", "\n", ) main_cmd = True elif ns_known_args.opt == "export": export(l_args, df_stock) main_cmd = True elif ns_known_args.opt == "disc": b_quit = disc_controller.menu() elif ns_known_args.opt == "mill": b_quit = mill.papermill_menu() elif ns_known_args.opt == "ba": b_quit = ba_controller.menu( s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start ) elif ns_known_args.opt == "res": b_quit = res_controller.menu( s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "ca": b_quit = ca_controller.menu(df_stock, s_ticker, s_start, s_interval) elif ns_known_args.opt == "fa": b_quit = fa_controller.menu( s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "fx": b_quit = fx_controller.menu() elif ns_known_args.opt == "ta": b_quit = ta_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "dd": b_quit = dd_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "eda": if s_interval == "1440min": b_quit = eda_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) else: df_stock = yf.download(s_ticker, start=s_start, progress=False) df_stock = df_stock.rename( columns={ "Open": "1. open", "High": "2. high", "Low": "3. low", "Close": "4. close", "Adj Close": "5. adjusted close", "Volume": "6. volume", } ) df_stock.index.name = "date" s_interval = "1440min" b_quit = eda_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "op": b_quit = op_controller.menu( s_ticker, df_stock["5. adjusted close"].values[-1] ) elif ns_known_args.opt == "econ": b_quit = econ_controller.menu() elif ns_known_args.opt == "pa": b_quit = port_controller.menu() elif ns_known_args.opt == "crypto": b_quit = crypto_controller.menu() elif ns_known_args.opt == "po": b_quit = po_controller.menu([s_ticker]) elif ns_known_args.opt == "pred": if not gtff.ENABLE_PREDICT: print("Predict is not enabled in feature_flags.py") print("Prediction menu is disabled") print("") continue try: # pylint: disable=import-outside-toplevel from gamestonk_terminal.prediction_techniques import pred_controller except ModuleNotFoundError as e: print("One of the optional packages seems to be missing") print("Optional packages need to be installed") print(e) print("") continue except Exception as e: print(e) print("") continue if s_interval == "1440min": b_quit = pred_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) # If stock data is intradaily, we need to get data again as prediction # techniques work on daily adjusted data. By default we load data from # Alpha Vantage because the historical data loaded gives a larger # dataset than the one provided by quandl else: try: ts = TimeSeries( key=cfg.API_KEY_ALPHAVANTAGE, output_format="pandas" ) # pylint: disable=unbalanced-tuple-unpacking df_stock_pred, _ = ts.get_daily_adjusted( symbol=s_ticker, outputsize="full" ) # pylint: disable=no-member df_stock_pred = df_stock_pred.sort_index(ascending=True) df_stock_pred = df_stock_pred[s_start:] b_quit = pred_controller.menu( df_stock_pred, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, interval="1440min", ) except Exception as e: print(e) print("Either the ticker or the API_KEY are invalids. Try again!") return elif ns_known_args.opt == "ra": b_quit = ra_controller.menu( df_stock, s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start, s_interval, ) elif ns_known_args.opt == "scr": b_quit = screener_controller.menu() elif ns_known_args.opt == "bt": b_quit = bt_controller.menu( s_ticker.split(".")[0] if "." in s_ticker else s_ticker, s_start ) elif ns_known_args.opt == "rc": b_quit = rc_controller.menu() elif ns_known_args.opt == "gov": b_quit = gov_controller.menu(s_ticker) else: print("Shouldn't see this command!") continue if b_quit: break if not main_cmd: should_print_help = True print_goodbye() if __name__ == "__main__": main()
import ujson import sys import argparse import re import spacy spacy_nlp = spacy.load('en_core_web_sm') # extra split referred to allenai docqa extra_split_chars = ("-", "£", "€", "¥", "¢", "₹", "\u2212", "\u2014", "\u2013", "/", "~", "(", ")", "+", "^", "=", "\[", "\]", "'", '"', "'", "\ud01C", "\u2019", "\u201D", "\u2018", "\u00B0") extra_split_tokens = ("``", "(?<=[^_])_(?=[^_])", # dashes w/o a preceeding or following dash, so __wow___ -> ___ wow ___ "''", "[" + "".join(extra_split_chars) + "]") extra_split_chars_re = re.compile("(" + "|".join(extra_split_tokens) + ")") def extra_split(tokens): return [x for t in tokens for x in extra_split_chars_re.split(t) if x != ""] def rephrase_quote(tokens): return [t.replace("''", '"').replace("``", '"') for t in tokens] # split token along with pos tag (basically duplicate) def extra_split_with_pos(tokens, pos): rs_tokens = [] rs_pos = [] for t, p in zip(tokens, pos): split = [x for x in extra_split_chars_re.split(t) if x != ""] rs_tokens.extend(split) rs_pos.extend([p] * len(split)) assert(len(rs_tokens) == len(rs_pos)) return rs_tokens, rs_pos # tokenizer that # splits into sentences (optnal) # tokenize def tokenize_spacy(text, split_sent, tag_type): tokenized = spacy_nlp(text) if split_sent: tokenized_sents = [] pos_sents = [] for sent in tokenized.sents: tokenized_sents.append([tok.text for tok in sent if not tok.is_space]) if tag_type == 'universal': pos_sents.append([tok.pos_ for tok in sent if not tok.is_space]) elif tag_type == 'ptb': pos_sents.append([tok.tag_ for tok in sent if not tok.is_space]) else: assert(False) return tokenized_sents, pos_sents toks = [tok.text for tok in tokenized if not tok.is_space] pos = None if tag_type == 'universal': pos = [tok.pos_ for tok in tokenized if not tok.is_space] elif tag_type == 'ptb': pos = [tok.tag_ for tok in tokenized if not tok.is_space] else: assert(False) return toks, pos def get_gold(answer_spans): cnt = {} for span in answer_spans: if span in cnt: cnt[span] = cnt[span] + 1 else: cnt[span] = 1 sorted_keys = sorted(cnt.items(), key=lambda x: x[1], reverse=True) maj_span = sorted_keys[0][0] return (maj_span, answer_spans.index(maj_span)) def write_to(ls, out_file): print('writing to {0}'.format(out_file)) with open(out_file, 'w+') as f: for l in ls: f.write((l + '\n')) def remap_char_idx(context, context_toks): context_tok_seq = ' '.join(context_toks) m = [-1 for _ in range(len(context))] i = 0 j = 0 while (i < len(context) and j < len(context_tok_seq)): # skip white spaces while context[i].strip() == '': i += 1 while context_tok_seq[j].strip() == '': j += 1 if context[i] == context_tok_seq[j]: m[i] = j i += 1 j += 1 elif context[i] == "'" and context[i+1] == "'" and context_tok_seq[j] == '"': m[i] = j i += 2 j += 1 #elif context[i] == '"' and context_tok_seq[j] == '\'': # m[i] = j # i += 1 # if context_tok_seq[j+1] == '\'': # j += 2 else: print(context.encode('utf8')) print(context_tok_seq.encode('utf8')) print(context[:i+1].encode('utf8')) print(context_tok_seq[:j+1].encode('utf8')) assert(False) return m def remap_token_span(m, context, context_toks): tok_str = ' '.join(context_toks) remap = [-1 for _ in range(len(tok_str))] assert(len(m) == len(context)) for i in range(len(m)): if m[i] != -1: remap[m[i]] = i assert(remap[0] != -1 and remap[-1] != -1) token_spans = [] in_span = True start = 0 for i in range(len(remap)): if (remap[i] == -1 and in_span): token_spans.append((remap[start], remap[i-1])) in_span = False elif remap[i] != -1 and not in_span: start = i in_span=True token_spans.append((remap[start], remap[-1])) # sanity check if len(token_spans) != len(context_toks): print(token_spans) print(context_toks) print(len(token_spans), len(context_toks)) for i in range(len(context_toks)): if token_spans[i][1] - token_spans[i][0] + 1 != len(context_toks[i]): print(context_toks[i]) print(token_spans[i]) print(i) assert(len(token_spans) == len(context_toks)) # make sure there is no -1 on boundary assert(sum([-1 in m[s[0]:s[1]+1] for s in token_spans]) == 0) return token_spans def map_answer_idx(context, context_toks, m, char_idx1, char_idx2): context_tok_seq = ' '.join(context_toks) #m = remap_char_idx(context, context_toks) new_char_idx1 = m[char_idx1] new_char_idx2 = m[char_idx2] # count number of spaces tok_idx1 = context_tok_seq[new_char_idx1::-1].count(' ') tok_idx2 = context_tok_seq[new_char_idx2::-1].count(' ') # sanity check assert(tok_idx1 < len(context_toks)) assert(tok_idx2 < len(context_toks)) # NOTE, ending index is inclusive return (tok_idx1, tok_idx2) def check_span_overlap(span1, span2): return (span1[0] <= span2[0] and span2[0] <= span1[1]) or (span1[0] <= span2[1] and span2[1] <= span1[1]) or \ (span2[0] <= span1[0] and span1[0] <= span2[1]) or (span2[0] <= span1[1] and span1[1] <= span2[1]) def filter_by(keys, tokens, pos, token_span, ans_char_span, ans_tok_span): rs_tokens = [] rs_pos = [] rs_token_span = [] rs_ans_span = [ans_tok_span[0], ans_tok_span[1]] for i, (tok, tag, tok_span) in enumerate(zip(tokens, pos, token_span)): #if tag in keys and sum([check_span_overlap(tok_span, a) for a in ans_char_span]) == 0: if tag in keys: if i < ans_tok_span[0]: rs_ans_span[0] -= 1 if i <= ans_tok_span[1]: rs_ans_span[1] -= 1 rs_ans_span[0] = 0 if rs_ans_span[0] == -1 else rs_ans_span[0] rs_ans_span[1] = 0 if rs_ans_span[1] == -1 else rs_ans_span[1] # few cases where the start idx becomes greater than end idx if rs_ans_span[0] > rs_ans_span[1]: rs_ans_span[0] = rs_ans_span[1] # in this case, the answer is probably not good else: rs_tokens.append(tok) rs_pos.append(tag) rs_token_span.append((tok_span)) assert(rs_ans_span[0] < len(rs_tokens)) assert(rs_ans_span[1] < len(rs_tokens)) return rs_tokens, rs_pos, rs_token_span, rs_ans_span def extract(opt, json_file): all_raw_context = [] all_context = [] all_context_sents = [] all_context_pos = [] all_query = [] all_query_pos = [] all_span = [] all_token_spans = [] # only for context all_raw_ans = [] context_max_sent_num = 0 max_sent_l = 0 with open(json_file, 'r') as f: f_str = f.read() j_obj = ujson.loads(f_str) data = j_obj['data'] for article in data: title = article['title'] pars = article['paragraphs'] for p in pars: context = p['context'] qas = p['qas'] # tokenize context = context.replace('\n', ' ') # there are few cases have multiple paras, take them as single para context_sent_toks, context_sent_pos = tokenize_spacy(context, split_sent=True, tag_type=opt.tag_type) packed = [extra_split_with_pos(sent, pos_sent) for sent, pos_sent in zip(context_sent_toks, context_sent_pos)] context_sent_toks = [p[0] for p in packed] context_sent_toks = [rephrase_quote(s) for s in context_sent_toks] context_toks = [t for s in context_sent_toks for t in s] context_sent_pos = [p[1] for p in packed] context_pos = [pos for s in context_sent_pos for pos in s] assert(len(context_toks) == len(context_pos)) # get the token spans (token to original char span) # span end idx is inclusive char_remap = remap_char_idx(context, context_toks) token_spans = remap_token_span(char_remap, context, context_toks) max_sent_l = max(max_sent_l, len(context_toks)) for qa in qas: query = qa['question'] ans = qa['answers'] # tokenize query_toks, query_pos = tokenize_spacy(query, split_sent=False, tag_type=opt.tag_type) query_toks, query_pos = extra_split_with_pos(query_toks, query_pos) query_toks = rephrase_quote(query_toks) assert(len(query_toks) == len(query_pos)) max_sent_l = max(max_sent_l, len(query_toks)) answer_orig_spans = [] for a in ans: a_txt = a['text'] idx1 = a['answer_start'] idx2 = idx1 + len(a_txt) - 1 # end idx is inclusive answer_orig_spans.append((idx1, idx2)) orig_maj_span = get_gold(answer_orig_spans)[0] # map orig char idx to tokenized word idx tok_idx1, tok_idx2 = map_answer_idx(context, context_toks, char_remap, orig_maj_span[0], orig_maj_span[1]) # orig_answer = context[orig_maj_span[0]:orig_maj_span[1]+1] all_orig_answers = [context[orig_span[0]:orig_span[1]+1] for orig_span in answer_orig_spans] matched_answer = context_toks[tok_idx1:tok_idx2+1] recovered_answer = context[token_spans[tok_idx1][0]:token_spans[tok_idx2][1]+1] print(orig_maj_span, (tok_idx1, tok_idx2), orig_answer, matched_answer, recovered_answer) # sanity check # make sure recovered token is a superset of ground truth # (some gold answers are partial token) if orig_answer not in recovered_answer: print(context) print(orig_answer) print(token_spans) print(tok_idx1, tok_idx2) assert(False) # concat sent tokens with sentence delimiter context_toks_separated = [] for s in context_sent_toks: context_toks_separated.extend(s + ['|||']) # TODO, add option to filter pos in query as well if opt.filter != '': filters = opt.filter.split(',') filtered_context_toks, filtered_context_pos, filtered_span, filtered_ans_tok_idx = filter_by( filters, context_toks, context_pos, token_spans, [orig_maj_span], (tok_idx1, tok_idx2)) filtered_ans = filtered_context_toks[filtered_ans_tok_idx[0]:filtered_ans_tok_idx[1]+1] if matched_answer != filtered_ans: print('chopped answer: {0}, {1}'.format(matched_answer, filtered_ans)) # TODO, context_toks_separated is not filtered unfortunately... all_raw_context.append(context.rstrip()) all_context.append(' '.join(filtered_context_toks)) all_context_sents.append(' '.join(context_toks_separated)) all_context_pos.append(' '.join(filtered_context_pos)) all_query.append(' '.join(query_toks)) all_query_pos.append(' '.join(query_pos)) all_span.append(filtered_ans_tok_idx) all_token_spans.append(filtered_span) all_raw_ans.append('|||'.join(all_orig_answers)) else: # add to final list all_raw_context.append(context.rstrip()) all_context.append(' '.join(context_toks)) all_context_sents.append(' '.join(context_toks_separated)) all_context_pos.append(' '.join(context_pos)) all_query.append(' '.join(query_toks)) all_query_pos.append(' '.join(query_pos)) all_span.append((tok_idx1, tok_idx2)) all_token_spans.append(token_spans) all_raw_ans.append('|||'.join(all_orig_answers)) print('max sent len: {0}'.format(max_sent_l)) return (all_raw_context, all_context_sents, all_context, all_query, all_span, all_raw_ans, all_token_spans, all_context_pos, all_query_pos) parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--dir', help="Path to the data dir", default="data/squad-v1.1/") parser.add_argument('--data', help="Path to SQUAD json file", default="dev-v1.1.json") parser.add_argument('--output', help="Prefix to the path of output", default="dev") parser.add_argument('--filter', help="List of pos tags to filter out", default="") parser.add_argument('--tag_type', help="The type of pos tag, universal/ptb", default="universal") def main(args): opt = parser.parse_args(args) # append path opt.data = opt.dir + opt.data opt.output = opt.dir + opt.output raw_context, context_sents, context, query, span, raw_ans, token_spans, context_pos, query_pos = extract(opt, opt.data) print('{0} examples processed.'.format(len(context))) assert(len(raw_context) == len(context_sents)) assert(len(query) == len(context_sents)) assert(len(span) == len(context_sents)) assert(len(raw_ans) == len(context_sents)) assert(len(token_spans) == len(context_sents)) assert(len(context) == len(context_sents)) write_to(raw_context, opt.output + '.raw_context.txt') write_to(context, opt.output + '.context.txt') write_to(context_sents, opt.output + '.context_sent.txt') write_to(context_pos, opt.output + '.context_pos.txt') write_to(query, opt.output + '.raw_query.txt') write_to(query, opt.output + '.query.txt') write_to(query_pos, opt.output + '.query_pos.txt') write_to(raw_ans, opt.output + '.raw_answer.txt') span = ['{0} {1}'.format(p[0], p[1]) for p in span] write_to(span, opt.output + '.span.txt') token_span_ls = [] for tok in token_spans: token_span_ls.append(' '.join(['{0}:{1}'.format(s,e) for (s,e) in tok])) write_to(token_span_ls, opt.output + '.token_span.txt') if __name__ == '__main__': sys.exit(main(sys.argv[1:]))
# Generated by Django 3.2 on 2021-04-15 19:07 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('api', '0001_initial'), ] operations = [ migrations.AlterModelOptions( name='lesson', options={'ordering': ['day', 'timetable']}, ), migrations.RenameField( model_name='cafedra', old_name='cafedra_name', new_name='name', ), migrations.RenameField( model_name='faculty', old_name='faculty_name', new_name='name', ), migrations.RenameField( model_name='group', old_name='group_name', new_name='name', ), migrations.RenameField( model_name='lessontype', old_name='type_name', new_name='name', ), migrations.RenameField( model_name='position', old_name='pos_name', new_name='name', ), ]
import numpy as np import pickle import matplotlib.pyplot as plt import matplotlib.image as mpimg selectors = pickle.load( open( 'l2_l1_reg/020', 'rb' ) ) #we throw out the biases because they're not important for pruning weights = [s for s in selectors if len(s.shape)==4] weights = [w.reshape((w.shape[0],w.shape[1])) for w in weights] weights = [np.abs(w) for w in weights] # weights = [w/np.std(w) for w in weights] max_width = 600 weights = [np.pad(w,((0,0),(0,600-w.shape[1])),'constant',constant_values=-0.01) for w in weights] weights = np.concatenate(weights,axis=0) print(weights.shape) imgplot = plt.imshow(weights) plt.show()
#!/usr/bin/env python # -*- coding: UTF-8 -*- import random import numpy as np from activators import SigmoidActivator, IdentityActivator # 全连接层实现类 class FullConnectedLayer(object): def __init__(self, input_size, output_size, activator): ''' 构造函数 input_size: 本层输入向量的维度 output_size: 本层输出向量的维度 activator: 激活函数 ''' self.input_size = input_size self.output_size = output_size self.activator = activator # 权重数组W self.W = np.random.uniform(-0.1, 0.1, (output_size, input_size)) # 偏置项b self.b = np.zeros((output_size, 1)) # 输出向量 self.output = np.zeros((output_size, 1)) def forward(self, input_array): ''' 前向计算 input_array: 输入向量,维度必须等于input_size ''' # 式2 self.input = input_array self.output = self.activator.forward( np.dot(self.W, input_array) + self.b) def backward(self, delta_array): ''' 反向计算W和b的梯度 delta_array: 从上一层传递过来的误差项 ''' # 式8 self.delta = self.activator.backward(self.input) * np.dot( self.W.T, delta_array) self.W_grad = np.dot(delta_array, self.input.T) self.b_grad = delta_array def update(self, learning_rate): ''' 使用梯度下降算法更新权重 ''' self.W += learning_rate * self.W_grad self.b += learning_rate * self.b_grad def dump(self): print 'W: %s\nb:%s' % (self.W, self.b) # 神经网络类 class Network(object): def __init__(self, layers): ''' 构造函数 ''' self.layers = [] for i in range(len(layers) - 1): self.layers.append( FullConnectedLayer( layers[i], layers[i+1], SigmoidActivator() ) ) def predict(self, sample): ''' 使用神经网络实现预测 sample: 输入样本 ''' output = sample for layer in self.layers: layer.forward(output) output = layer.output return output def train(self, labels, data_set, rate, epoch): ''' 训练函数 labels: 样本标签 data_set: 输入样本 rate: 学习速率 epoch: 训练轮数 ''' for i in range(epoch): for d in range(len(data_set)): self.train_one_sample(labels[d], data_set[d], rate) def train_one_sample(self, label, sample, rate): self.predict(sample) self.calc_gradient(label) self.update_weight(rate) def calc_gradient(self, label): delta = self.layers[-1].activator.backward( self.layers[-1].output ) * (label - self.layers[-1].output) for layer in self.layers[::-1]: layer.backward(delta) delta = layer.delta return delta def update_weight(self, rate): for layer in self.layers: layer.update(rate) def dump(self): for layer in self.layers: layer.dump() def loss(self, output, label): return 0.5 * ((label - output) * (label - output)).sum() def gradient_check(self, sample_feature, sample_label): ''' 梯度检查 network: 神经网络对象 sample_feature: 样本的特征 sample_label: 样本的标签 ''' # 获取网络在当前样本下每个连接的梯度 self.predict(sample_feature) self.calc_gradient(sample_label) # 检查梯度 epsilon = 10e-4 for fc in self.layers: for i in range(fc.W.shape[0]): for j in range(fc.W.shape[1]): fc.W[i,j] += epsilon output = self.predict(sample_feature) err1 = self.loss(sample_label, output) fc.W[i,j] -= 2*epsilon output = self.predict(sample_feature) err2 = self.loss(sample_label, output) expect_grad = (err1 - err2) / (2 * epsilon) fc.W[i,j] += epsilon print 'weights(%d,%d): expected - actural %.4e - %.4e' % ( i, j, expect_grad, fc.W_grad[i,j]) from bp import train_data_set def transpose(args): return map( lambda arg: map( lambda line: np.array(line).reshape(len(line), 1) , arg) , args ) class Normalizer(object): def __init__(self): self.mask = [ 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80 ] def norm(self, number): data = map(lambda m: 0.9 if number & m else 0.1, self.mask) return np.array(data).reshape(8, 1) def denorm(self, vec): binary = map(lambda i: 1 if i > 0.5 else 0, vec[:,0]) for i in range(len(self.mask)): binary[i] = binary[i] * self.mask[i] return reduce(lambda x,y: x + y, binary) def train_data_set(): normalizer = Normalizer() data_set = [] labels = [] for i in range(0, 256): n = normalizer.norm(i) data_set.append(n) labels.append(n) return labels, data_set def correct_ratio(network): normalizer = Normalizer() correct = 0.0; for i in range(256): if normalizer.denorm(network.predict(normalizer.norm(i))) == i: correct += 1.0 print 'correct_ratio: %.2f%%' % (correct / 256 * 100) def test(): labels, data_set = transpose(train_data_set()) net = Network([8, 3, 8]) rate = 0.5 mini_batch = 20 epoch = 10 for i in range(epoch): net.train(labels, data_set, rate, mini_batch) print 'after epoch %d loss: %f' % ( (i + 1), net.loss(labels[-1], net.predict(data_set[-1])) ) rate /= 2 correct_ratio(net) def gradient_check(): ''' 梯度检查 ''' labels, data_set = transpose(train_data_set()) net = Network([8, 3, 8]) net.gradient_check(data_set[0], labels[0]) return net
""" Implementations for `Provisioning` classes Copyright 2015 BlazeMeter Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import datetime import sys import time import traceback from bzt import ToolError from bzt.engine import Provisioning, SelfDiagnosable from bzt.six import reraise from bzt.utils import dehumanize_time class Local(Provisioning): """ Local provisioning means we start all the tools locally """ def __init__(self): super(Local, self).__init__() self.extend_configs = True self.start_time = None self.available_slots = None self.finished_modules = [] self.started_modules = [] def _get_start_shift(self, shift): if not shift: return 0 time_formats = ['%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M', '%H:%M:%S', '%H:%M'] for time_format in time_formats: try: date = datetime.datetime.strptime(shift, time_format) except ValueError: continue except TypeError: self.log.warning('Start time must be string type ("%s"), ignored "%s"', time_format[0], shift) break today = datetime.date.today() if today > date.date(): date = datetime.datetime(today.year, today.month, today.day, date.hour, date.minute, date.second) return time.mktime(date.timetuple()) - self.start_time else: self.log.warning('Unrecognized time format: %s ("%s" required), ignored', shift, time_formats[0]) return 0 def prepare(self): super(Local, self).prepare() for executor in self.executors: self.log.debug("Preparing executor: %s", executor) executor.prepare() def startup(self): self.start_time = time.time() if self.settings.get("sequential", False): self.available_slots = 1 else: self.available_slots = self.settings.get("capacity", None) if not self.available_slots: self.available_slots = sys.maxsize # no limit for executor in self.executors: start_at = executor.execution.get('start-at', 0) start_shift = self._get_start_shift(start_at) delay = dehumanize_time(executor.execution.get('delay', 0)) executor.delay = delay + start_shift msg = "Delay setup for %s: %s(start-at) + %s(delay) = %s" self.log.debug(msg, executor, start_shift, delay, executor.delay) def _start_modules(self): if self.available_slots: non_started_executors = [e for e in self.executors if e not in self.started_modules] for executor in non_started_executors: self.engine.logging_level_up() if time.time() >= self.start_time + executor.delay: executor.startup() self.started_modules.append(executor) self.available_slots -= 1 msg = "Starting execution: %s, rest of available slots: %s" self.log.debug(msg, executor, self.available_slots) if not self.available_slots: break self.engine.logging_level_down() def check(self): """ Check executors for finish. Return True if all of them has finished. """ finished = True self._start_modules() for executor in self.executors: if executor in self.finished_modules: continue if executor not in self.started_modules: finished = False continue if executor.check(): self.finished_modules.append(executor) self.available_slots += 1 self.log.debug("%s finished", executor) else: finished = False return finished def shutdown(self): """ Call shutdown on executors """ exc_info = exc_value = None for executor in self.started_modules: self.log.debug("Shutdown %s", executor) try: executor.shutdown() except BaseException as exc: msg = "Exception in shutdown of %s: %s %s" self.log.debug(msg, executor.__class__.__name__, exc, traceback.format_exc()) if not exc_info: exc_info = sys.exc_info() if not exc_value: exc_value = exc if exc_info: reraise(exc_info, exc_value) def post_process(self): """ Post-process executors """ exc_info = exc_value = None for executor in self.executors: self.log.debug("Post-process %s", executor) try: executor.post_process() if executor in self.started_modules and not executor.has_results(): msg = "Empty results, most likely %s (%s) failed. " \ "Actual reason for this can be found in logs under %s" message = msg % (executor.label, executor.__class__.__name__, self.engine.artifacts_dir) diagnostics = None if isinstance(executor, SelfDiagnosable): diagnostics = executor.get_error_diagnostics() raise ToolError(message, diagnostics) except BaseException as exc: msg = "Exception in post_process of %s: %s %s" self.log.debug(msg, executor.__class__.__name__, exc, traceback.format_exc()) if not exc_info: exc_info = sys.exc_info() if not exc_value: exc_value = exc if exc_info: reraise(exc_info, exc_value)
from __future__ import absolute_import from __future__ import division from __future__ import print_function import re import sys import time import pytest import requests import ray @pytest.mark.skipif( sys.version_info < (3, 5, 3), reason="requires python3.5.3 or higher") def test_get_webui(shutdown_only): addresses = ray.init(include_webui=True, num_cpus=1) webui_url = addresses["webui_url"] assert ray.get_webui_url() == webui_url assert re.match(r"^(localhost|\d+\.\d+\.\d+\.\d+):8080$", webui_url) start_time = time.time() while True: try: node_info = requests.get("http://" + webui_url + "/api/node_info").json() break except requests.exceptions.ConnectionError: if time.time() > start_time + 30: raise Exception( "Timed out while waiting for dashboard to start.") assert node_info["error"] is None assert node_info["result"] is not None assert isinstance(node_info["timestamp"], float)
from django.contrib import admin # Register your models here. from .models import Users class UserAdmin(admin.ModelAdmin): list_display = ['id', 'username', 'age', 'phone', 'email'] admin.site.register(Users, UserAdmin)
""" AMPAREX Rest API Documentation This is the description of the AMPAREX Rest API. All REST calls plus the corresponding data model are described in this documentation. Direct calls to the server are possible over this page.&lt;br/&gt;Following steps are needed to use the API:&lt;br/&gt;&lt;br/&gt;1. Get the alias identifier of your login account from AMPAREX Software (Branch office administration) -&gt; Service accounts -&gt; your service account -&gt; copy alias token)&lt;br/&gt;2. Please use the login URL /alias/{alias}/login under section \"Login\" below with your credentials to get a valid bearer token.&lt;br/&gt;3. Copy bearer token from login response&lt;br/&gt;3. Then click \"Authorize\" on the top of this page&lt;br/&gt;4. Insert into the field \"value\": \"Bearer {Your Bearer token}\" (without {}) for example \"Bearer 334d34d3dgh5tz5h5h\"&lt;br/&gt;4. Click Authorize&lt;br/&gt;5. Bearer token will be automatically used in the header for every following API call.&lt;br/&gt;6. Now you are ready to use the API&lt;br/&gt;&lt;br/&gt;See also [documentation](https://manual.amparex.com/display/HAN/AMPAREX+API) for help&lt;br/&gt;&lt;br/&gt;Documentation of all the used fields and objects is at the bottom of this page called \"Models\" # noqa: E501 The version of the OpenAPI document: 1.0.0 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from amparex.model_utils import ( # noqa: F401 ApiTypeError, ModelComposed, ModelNormal, ModelSimple, cached_property, change_keys_js_to_python, convert_js_args_to_python_args, date, datetime, file_type, none_type, validate_get_composed_info, ) from ..model_utils import OpenApiModel from amparex.exceptions import ApiAttributeError def lazy_import(): from amparex.model.optional_mapstringstring import OptionalMapstringstring globals()['OptionalMapstringstring'] = OptionalMapstringstring class ContactLenseDetail(ModelNormal): """NOTE: This class is auto generated by OpenAPI Generator. Ref: https://openapi-generator.tech Do not edit the class manually. Attributes: allowed_values (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict with a capitalized key describing the allowed value and an allowed value. These dicts store the allowed enum values. attribute_map (dict): The key is attribute name and the value is json key in definition. discriminator_value_class_map (dict): A dict to go from the discriminator variable value to the discriminator class name. validations (dict): The key is the tuple path to the attribute and the for var_name this is (var_name,). The value is a dict that stores validations for max_length, min_length, max_items, min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum, inclusive_minimum, and regex. additional_properties_type (tuple): A tuple of classes accepted as additional properties values. """ allowed_values = { } validations = { } @cached_property def additional_properties_type(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded """ lazy_import() return (bool, date, datetime, dict, float, int, list, str, none_type,) # noqa: E501 _nullable = False @cached_property def openapi_types(): """ This must be a method because a model may have properties that are of type self, this must run after the class is loaded Returns openapi_types (dict): The key is attribute name and the value is attribute type. """ lazy_import() return { 'addition': (str,), # noqa: E501 'amount': (float,), # noqa: E501 'article_id': (str,), # noqa: E501 'article_name': (str,), # noqa: E501 'axis_cylinder': (float,), # noqa: E501 'color': (str,), # noqa: E501 'cylinder': (float,), # noqa: E501 'diameter': (float,), # noqa: E501 'excentricity': (float,), # noqa: E501 'id': (str,), # noqa: E501 'material': (str,), # noqa: E501 'properties': (OptionalMapstringstring,), # noqa: E501 'radius_basecurve': (float,), # noqa: E501 'sales_price': (float,), # noqa: E501 'sphere': (float,), # noqa: E501 'uid_manufacturer': (str,), # noqa: E501 } @cached_property def discriminator(): return None attribute_map = { 'addition': 'addition', # noqa: E501 'amount': 'amount', # noqa: E501 'article_id': 'articleId', # noqa: E501 'article_name': 'articleName', # noqa: E501 'axis_cylinder': 'axisCylinder', # noqa: E501 'color': 'color', # noqa: E501 'cylinder': 'cylinder', # noqa: E501 'diameter': 'diameter', # noqa: E501 'excentricity': 'excentricity', # noqa: E501 'id': 'id', # noqa: E501 'material': 'material', # noqa: E501 'properties': 'properties', # noqa: E501 'radius_basecurve': 'radiusBasecurve', # noqa: E501 'sales_price': 'salesPrice', # noqa: E501 'sphere': 'sphere', # noqa: E501 'uid_manufacturer': 'uidManufacturer', # noqa: E501 } read_only_vars = { } _composed_schemas = {} @classmethod @convert_js_args_to_python_args def _from_openapi_data(cls, *args, **kwargs): # noqa: E501 """ContactLenseDetail - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) addition (str): Addition. [optional] # noqa: E501 amount (float): Amount. [optional] # noqa: E501 article_id (str): The ID of the article. [optional] # noqa: E501 article_name (str): Articles name. This is only used as a fallback if the article can not be matched by its articleID or it UID of the manufacturer. Otherwise it will be ignored.. [optional] # noqa: E501 axis_cylinder (float): Axis of the cylinder. [optional] # noqa: E501 color (str): Color. [optional] # noqa: E501 cylinder (float): Cylinder. [optional] # noqa: E501 diameter (float): Diameter. [optional] # noqa: E501 excentricity (float): Excentricity. [optional] # noqa: E501 id (str): [optional] # noqa: E501 material (str): Material. [optional] # noqa: E501 properties (OptionalMapstringstring): [optional] # noqa: E501 radius_basecurve (float): Basecurve. [optional] # noqa: E501 sales_price (float): Sales price. [optional] # noqa: E501 sphere (float): Sphere. [optional] # noqa: E501 uid_manufacturer (str): If the articleId is not specified the article is searched by its manufacturer UID. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) self = super(OpenApiModel, cls).__new__(cls) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) return self required_properties = set([ '_data_store', '_check_type', '_spec_property_naming', '_path_to_item', '_configuration', '_visited_composed_classes', ]) @convert_js_args_to_python_args def __init__(self, *args, **kwargs): # noqa: E501 """ContactLenseDetail - a model defined in OpenAPI Keyword Args: _check_type (bool): if True, values for parameters in openapi_types will be type checked and a TypeError will be raised if the wrong type is input. Defaults to True _path_to_item (tuple/list): This is a list of keys or values to drill down to the model in received_data when deserializing a response _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _configuration (Configuration): the instance to use when deserializing a file_type parameter. If passed, type conversion is attempted If omitted no type conversion is done. _visited_composed_classes (tuple): This stores a tuple of classes that we have traveled through so that if we see that class again we will not use its discriminator again. When traveling through a discriminator, the composed schema that is is traveled through is added to this set. For example if Animal has a discriminator petType and we pass in "Dog", and the class Dog allOf includes Animal, we move through Animal once using the discriminator, and pick Dog. Then in Dog, we will make an instance of the Animal class but this time we won't travel through its discriminator because we passed in _visited_composed_classes = (Animal,) addition (str): Addition. [optional] # noqa: E501 amount (float): Amount. [optional] # noqa: E501 article_id (str): The ID of the article. [optional] # noqa: E501 article_name (str): Articles name. This is only used as a fallback if the article can not be matched by its articleID or it UID of the manufacturer. Otherwise it will be ignored.. [optional] # noqa: E501 axis_cylinder (float): Axis of the cylinder. [optional] # noqa: E501 color (str): Color. [optional] # noqa: E501 cylinder (float): Cylinder. [optional] # noqa: E501 diameter (float): Diameter. [optional] # noqa: E501 excentricity (float): Excentricity. [optional] # noqa: E501 id (str): [optional] # noqa: E501 material (str): Material. [optional] # noqa: E501 properties (OptionalMapstringstring): [optional] # noqa: E501 radius_basecurve (float): Basecurve. [optional] # noqa: E501 sales_price (float): Sales price. [optional] # noqa: E501 sphere (float): Sphere. [optional] # noqa: E501 uid_manufacturer (str): If the articleId is not specified the article is searched by its manufacturer UID. [optional] # noqa: E501 """ _check_type = kwargs.pop('_check_type', True) _spec_property_naming = kwargs.pop('_spec_property_naming', False) _path_to_item = kwargs.pop('_path_to_item', ()) _configuration = kwargs.pop('_configuration', None) _visited_composed_classes = kwargs.pop('_visited_composed_classes', ()) if args: raise ApiTypeError( "Invalid positional arguments=%s passed to %s. Remove those invalid positional arguments." % ( args, self.__class__.__name__, ), path_to_item=_path_to_item, valid_classes=(self.__class__,), ) self._data_store = {} self._check_type = _check_type self._spec_property_naming = _spec_property_naming self._path_to_item = _path_to_item self._configuration = _configuration self._visited_composed_classes = _visited_composed_classes + (self.__class__,) for var_name, var_value in kwargs.items(): if var_name not in self.attribute_map and \ self._configuration is not None and \ self._configuration.discard_unknown_keys and \ self.additional_properties_type is None: # discard variable. continue setattr(self, var_name, var_value) if var_name in self.read_only_vars: raise ApiAttributeError(f"`{var_name}` is a read-only attribute. Use `from_openapi_data` to instantiate " f"class with read only attributes.")
# Copyright 2021 Dynatrace LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import ssl import time import urllib.error import urllib.request from util.context import Context from util.logging import log_multiline_message TIMEOUT_SEC = 10 def perform_http_request_for_json(url, encoded_body_bytes, method, headers, verify_SSL: bool, context: Context): start_time = time.time() print(f"Performing {method} call for URL {url}") ssl_context = ssl.create_default_context() ssl_context.check_hostname = False if verify_SSL: ssl_context.verify_mode = ssl.CERT_REQUIRED else: ssl_context.verify_mode = ssl.CERT_NONE context.sfm.request_sent() req = urllib.request.Request( url, encoded_body_bytes, headers, method=method ) duration_sec = time.time() - start_time duration_ms = round(duration_sec * 1000, 2) try: with urllib.request.urlopen(req, context=ssl_context, timeout=TIMEOUT_SEC) as response: status = response.code body = response.read().decode("utf-8") except urllib.error.HTTPError as e: status = e.code # body = e.read().decode("utf-8") body = e.read().decode() except Exception as e: context.sfm.issue("request_failed_without_status_code") raise e context.sfm.request_finished_with_status_code(status, duration_ms) log_multiline_message(f"Response: call duration {duration_ms}ms, status code {status}, body '{body}'") return status, body
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import argparse import os from bs4 import BeautifulSoup js_scripts = """ <script type="text/javascript" id="documentation_options" data-url_root="./" src="/js/documentation_options.js"></script> <script type="text/javascript" src="/js/jquery.js"></script> <script type="text/javascript" src="/js/underscore.js"></script> <script type="text/javascript" src="/js/doctools.js"></script> <script type="text/javascript" src="/js/language_data.js"></script> <script type="text/javascript" src="/js/searchtools.js"></script> """ # noqa: E501 search_js_scripts = """ <script type="text/javascript"> jQuery(function() { Search.loadIndex("/js/searchindex.js"); }); </script> <script type="text/javascript" id="searchindexloader"></script> """ def parse_sphinx(input_dir, output_dir): for cur, _, files in os.walk(input_dir): for fname in files: if fname.endswith(".html"): with open(os.path.join(cur, fname), "r") as f: soup = BeautifulSoup(f.read(), "html.parser") doc = soup.find("div", {"class": "document"}) wrapped_doc = doc.wrap(soup.new_tag("div", **{"class": "sphinx"})) # add js if fname == "search.html": out = js_scripts + search_js_scripts + str(wrapped_doc) else: out = js_scripts + str(wrapped_doc) output_path = os.path.join(output_dir, os.path.relpath(cur, input_dir)) os.makedirs(output_path, exist_ok=True) with open(os.path.join(output_path, fname), "w") as fout: fout.write(out) # update reference in JS file with open(os.path.join(input_dir, "_static/searchtools.js"), "r") as js_file: js = js_file.read() js = js.replace( "DOCUMENTATION_OPTIONS.URL_ROOT + '_sources/'", "'_sphinx-sources/'" ) with open(os.path.join(input_dir, "_static/searchtools.js"), "w") as js_file: js_file.write(js) if __name__ == "__main__": parser = argparse.ArgumentParser(description="Strip HTML body from Sphinx docs.") parser.add_argument( "-i", "--input_dir", metavar="path", required=True, help="Input directory for Sphinx HTML.", ) parser.add_argument( "-o", "--output_dir", metavar="path", required=True, help="Output directory in Docusaurus.", ) args = parser.parse_args() parse_sphinx(args.input_dir, args.output_dir)
from __future__ import division from __future__ import print_function from __future__ import unicode_literals from __future__ import absolute_import from builtins import * # NOQA from future import standard_library standard_library.install_aliases() # NOQA import copy from logging import getLogger import chainer from chainer import functions as F import numpy as np from chainerrl import agent from chainerrl.misc import async from chainerrl.misc.batch_states import batch_states from chainerrl.misc import copy_param from chainerrl.recurrent import Recurrent from chainerrl.recurrent import RecurrentChainMixin from chainerrl.recurrent import state_kept logger = getLogger(__name__) class A3CModel(chainer.Link): """A3C model.""" def pi_and_v(self, obs): """Evaluate the policy and the V-function. Args: obs (Variable or ndarray): Batched observations. Returns: Distribution and Variable """ raise NotImplementedError() def __call__(self, obs): return self.pi_and_v(obs) class A3CSeparateModel(chainer.Chain, A3CModel, RecurrentChainMixin): """A3C model that consists of a separate policy and V-function. Args: pi (Policy): Policy. v (VFunction): V-function. """ def __init__(self, pi, v): super().__init__(pi=pi, v=v) def pi_and_v(self, obs): pout = self.pi(obs) vout = self.v(obs) return pout, vout class A3CSharedModel(chainer.Chain, A3CModel, RecurrentChainMixin): """A3C model where the policy and V-function share parameters. Args: shared (Link): Shared part. Nonlinearity must be included in it. pi (Policy): Policy that receives output of shared as input. v (VFunction): V-function that receives output of shared as input. """ def __init__(self, shared, pi, v): super().__init__(shared=shared, pi=pi, v=v) def pi_and_v(self, obs): h = self.shared(obs) pout = self.pi(h) vout = self.v(h) return pout, vout class A3C(agent.AttributeSavingMixin, agent.AsyncAgent): """A3C: Asynchronous Advantage Actor-Critic. See http://arxiv.org/abs/1602.01783 Args: model (A3CModel): Model to train optimizer (chainer.Optimizer): optimizer used to train the model t_max (int): The model is updated after every t_max local steps gamma (float): Discount factor [0,1] beta (float): Weight coefficient for the entropy regularizaiton term. process_idx (int): Index of the process. phi (callable): Feature extractor function pi_loss_coef (float): Weight coefficient for the loss of the policy v_loss_coef (float): Weight coefficient for the loss of the value function act_deterministically (bool): If set true, choose most probable actions in act method. batch_states (callable): method which makes a batch of observations. default is `chainerrl.misc.batch_states.batch_states` """ process_idx = None saved_attributes = ['model', 'optimizer'] def __init__(self, model, optimizer, t_max, gamma, beta=1e-2, process_idx=0, phi=lambda x: x, pi_loss_coef=1.0, v_loss_coef=0.5, keep_loss_scale_same=False, normalize_grad_by_t_max=False, use_average_reward=False, average_reward_tau=1e-2, act_deterministically=False, average_entropy_decay=0.999, average_value_decay=0.999, batch_states=batch_states): assert isinstance(model, A3CModel) # Globally shared model self.shared_model = model # Thread specific model self.model = copy.deepcopy(self.shared_model) async.assert_params_not_shared(self.shared_model, self.model) self.optimizer = optimizer self.t_max = t_max self.gamma = gamma self.beta = beta self.phi = phi self.pi_loss_coef = pi_loss_coef self.v_loss_coef = v_loss_coef self.keep_loss_scale_same = keep_loss_scale_same self.normalize_grad_by_t_max = normalize_grad_by_t_max self.use_average_reward = use_average_reward self.average_reward_tau = average_reward_tau self.act_deterministically = act_deterministically self.average_value_decay = average_value_decay self.average_entropy_decay = average_entropy_decay self.batch_states = batch_states self.t = 0 self.t_start = 0 self.past_action_log_prob = {} self.past_action_entropy = {} self.past_states = {} self.past_rewards = {} self.past_values = {} self.average_reward = 0 # A3C won't use a explorer, but this arrtibute is referenced by run_dqn self.explorer = None # Stats self.average_value = 0 self.average_entropy = 0 def sync_parameters(self): copy_param.copy_param(target_link=self.model, source_link=self.shared_model) @property def shared_attributes(self): return ('shared_model', 'optimizer') def update(self, statevar): assert self.t_start < self.t if statevar is None: R = 0 else: with state_kept(self.model): _, vout = self.model.pi_and_v(statevar) R = float(vout.data) pi_loss = 0 v_loss = 0 for i in reversed(range(self.t_start, self.t)): R *= self.gamma R += self.past_rewards[i] if self.use_average_reward: R -= self.average_reward v = self.past_values[i] advantage = R - v if self.use_average_reward: self.average_reward += self.average_reward_tau * \ float(advantage.data) # Accumulate gradients of policy log_prob = self.past_action_log_prob[i] entropy = self.past_action_entropy[i] # Log probability is increased proportionally to advantage pi_loss -= log_prob * float(advantage.data) # Entropy is maximized pi_loss -= self.beta * entropy # Accumulate gradients of value function v_loss += (v - R) ** 2 / 2 if self.pi_loss_coef != 1.0: pi_loss *= self.pi_loss_coef if self.v_loss_coef != 1.0: v_loss *= self.v_loss_coef # Normalize the loss of sequences truncated by terminal states if self.keep_loss_scale_same and \ self.t - self.t_start < self.t_max: factor = self.t_max / (self.t - self.t_start) pi_loss *= factor v_loss *= factor if self.normalize_grad_by_t_max: pi_loss /= self.t - self.t_start v_loss /= self.t - self.t_start if self.process_idx == 0: logger.debug('pi_loss:%s v_loss:%s', pi_loss.data, v_loss.data) total_loss = pi_loss + F.reshape(v_loss, pi_loss.data.shape) # Compute gradients using thread-specific model self.model.zerograds() total_loss.backward() # Copy the gradients to the globally shared model self.shared_model.zerograds() copy_param.copy_grad( target_link=self.shared_model, source_link=self.model) # Update the globally shared model if self.process_idx == 0: norm = sum(np.sum(np.square(param.grad)) for param in self.optimizer.target.params()) logger.debug('grad norm:%s', norm) self.optimizer.update() if self.process_idx == 0: logger.debug('update') self.sync_parameters() if isinstance(self.model, Recurrent): self.model.unchain_backward() self.past_action_log_prob = {} self.past_action_entropy = {} self.past_states = {} self.past_rewards = {} self.past_values = {} self.t_start = self.t def act_and_train(self, obs, reward): statevar = self.batch_states([obs], np, self.phi) self.past_rewards[self.t - 1] = reward if self.t - self.t_start == self.t_max: self.update(statevar) self.past_states[self.t] = statevar pout, vout = self.model.pi_and_v(statevar) action = pout.sample().data # Do not backprop through sampled actions self.past_action_log_prob[self.t] = pout.log_prob(action) self.past_action_entropy[self.t] = pout.entropy self.past_values[self.t] = vout self.t += 1 action = action[0] if self.process_idx == 0: logger.debug('t:%s r:%s a:%s pout:%s', self.t, reward, action, pout) # Update stats self.average_value += ( (1 - self.average_value_decay) * (float(vout.data[0]) - self.average_value)) self.average_entropy += ( (1 - self.average_entropy_decay) * (float(pout.entropy.data[0]) - self.average_entropy)) return action def act(self, obs): # Use the process-local model for acting with chainer.no_backprop_mode(): statevar = self.batch_states([obs], np, self.phi) pout, _ = self.model.pi_and_v(statevar) if self.act_deterministically: return pout.most_probable.data[0] else: return pout.sample().data[0] def stop_episode_and_train(self, state, reward, done=False): self.past_rewards[self.t - 1] = reward if done: self.update(None) else: statevar = self.batch_states([state], np, self.phi) self.update(statevar) if isinstance(self.model, Recurrent): self.model.reset_state() def stop_episode(self): if isinstance(self.model, Recurrent): self.model.reset_state() def load(self, dirname): super().load(dirname) copy_param.copy_param(target_link=self.shared_model, source_link=self.model) def get_statistics(self): return [ ('average_value', self.average_value), ('average_entropy', self.average_entropy), ]
# encoding=utf8 """Flower pollination algorithm module.""" import numpy as np from scipy.special import gamma as Gamma from WeOptPy.algorithms.interfaces import Algorithm from WeOptPy.util import reflect_repair __all__ = ['FlowerPollinationAlgorithm'] class FlowerPollinationAlgorithm(Algorithm): r"""Implementation of Flower Pollination algorithm. Algorithm: Flower Pollination algorithm Date: 2018 Authors: Dusan Fister, Iztok Fister Jr. and Klemen Berkovič License: MIT Reference paper: Yang, Xin-She. "Flower pollination algorithm for global optimization. International conference on unconventional computing and natural computation. Springer, Berlin, Heidelberg, 2012. References URL: Implementation is based on the following MATLAB code: https://www.mathworks.com/matlabcentral/fileexchange/45112-flower-pollination-algorithm?requestedDomain=true Attributes: Name (List[str]): List of strings representing algorithm names. p (float): probability switch. beta (float): Shape of the gamma distribution (should be greater than zero). See Also: * :class:`WeOptPy.algorithms.Algorithm` """ Name = ['FlowerPollinationAlgorithm', 'FPA'] @staticmethod def type_parameters(): r"""TODO. Returns: Dict[str, Callable]: * p (function): TODO * beta (function): TODO See Also: * :func:`WeOptPy.algorithms.Algorithm.typeParameters` """ d = Algorithm.type_parameters() d.update({ 'p': lambda x: isinstance(x, float) and 0 <= x <= 1, 'beta': lambda x: isinstance(x, (float, int)) and x > 0, }) return d def set_parameters(self, n=25, p=0.35, beta=1.5, **ukwargs): r"""Set core parameters of FlowerPollinationAlgorithm algorithm. Args: n (int): Population size. p (float): Probability switch. beta (float): Shape of the gamma distribution (should be greater than zero). See Also: * :func:`WeOptPy.algorithms.Algorithm.setParameters` """ Algorithm.set_parameters(self, n=n, **ukwargs) self.p, self.beta = p, beta self.S = np.zeros((n, 10)) def levy(self, D): r"""Levy function. Returns: float: Next Levy number. """ sigma = (Gamma(1 + self.beta) * np.sin(np.pi * self.beta / 2) / (Gamma((1 + self.beta) / 2) * self.beta * 2 ** ((self.beta - 1) / 2))) ** (1 / self.beta) return 0.01 * (self.normal(0, 1, D) * sigma / np.fabs(self.normal(0, 1, D)) ** (1 / self.beta)) def init_population(self, task): r"""Initialize the initial population. Args: task (Task): Optimization task. Returns: Tuple[numpy.ndarray, numpy.ndarray, list, dict]: 1. Initial population. 2. Initials population fitness/utility function values. 3. Additional arguments. 4. Additional keyword arguments. """ pop, fpop, args, d = Algorithm.init_population(self, task) d.update({'S': np.zeros((self.NP, task.D))}) return pop, fpop, args, d def run_iteration(self, task, Sol, Sol_f, xb, fxb, S, *args, **dparams): r"""Core function of FlowerPollinationAlgorithm algorithm. Args: task (Task): Optimization task. Sol (numpy.ndarray): Current population. Sol_f (numpy.ndarray): Current population fitness/function values. xb (numpy.ndarray): Global best solution. fxb (float): Global best solution function/fitness value. args (list): Additional arguments. dparams (dict): Additional keyword arguments. Returns: Tuple[numpy.ndarray, numpy.ndarray, numpy.ndarray, float, list, dict]: 1. New population. 2. New populations fitness/function values. 3. New global best solution 4. New global best solution fitness/objective value 5. Additional arguments. 6. Additional keyword arguments. """ for i in range(self.NP): if self.uniform(0, 1) > self.p: S[i] += self.levy(task.D) * (Sol[i] - xb) else: JK = self.Rand.permutation(self.NP) S[i] += self.uniform(0, 1) * (Sol[JK[0]] - Sol[JK[1]]) S[i] = reflect_repair(S[i], task.Lower, task.Upper) f_i = task.eval(S[i]) if f_i <= Sol_f[i]: Sol[i], Sol_f[i] = S[i], f_i if f_i <= fxb: xb, fxb = S[i].copy(), f_i return Sol, Sol_f, xb, fxb, args, {'S': S} # vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
# Licensed under a 3-clause BSD style license - see LICENSE.rst """Support VO Simple Cone Search capabilities.""" from __future__ import absolute_import, division, print_function, unicode_literals # STDLIB import warnings # THIRD-PARTY import numpy as np # LOCAL from . import vos_catalog from .async import AsyncBase from .exceptions import ConeSearchError, VOSError from ... import units as u from ...config.configuration import ConfigAlias from ...coordinates import ICRS, BaseCoordinateFrame, Longitude, Latitude, SkyCoord from ...units import Quantity from ...utils.timer import timefunc, RunTimePredictor from ...utils.exceptions import AstropyUserWarning from ...utils import data __all__ = ['AsyncConeSearch', 'conesearch', 'AsyncSearchAll', 'search_all', 'list_catalogs', 'predict_search', 'conesearch_timer'] # Skip these doctests for now; # TODO: Add the ability to add py.test markers (such as remote_data) to # doctests __doctest_skip__ = ['AsyncConeSearch', 'AsyncSearchAll'] CONESEARCH_DBNAME = ConfigAlias( '0.4', 'CONESEARCH_DBNAME', 'conesearch_dbname', 'astropy.vo.client.conesearch', 'astropy.vo') class AsyncConeSearch(AsyncBase): """Perform a Cone Search asynchronously and returns the result of the first successful query. .. note:: See `~astropy.vo.client.async.AsyncBase` for more details. Parameters ---------- args, kwargs : see :func:`conesearch` Examples -------- >>> from astropy import coordinates as coord >>> from astropy import units as u >>> c = coord.ICRS(6.0223 * u.degree, -72.0814 * u.degree) >>> async_search = conesearch.AsyncConeSearch( ... c, 0.5 * u.degree, ... catalog_db='The PMM USNO-A1.0 Catalogue (Monet 1997) 1') Check search status: >>> async_search.running() True >>> async_search.done() False Get search results after a 30-second wait (not to be confused with `astropy.utils.data.Conf.remote_timeout` that governs individual Cone Search queries). If search is still not done after 30 seconds, `TimeoutError` is raised. Otherwise, Cone Search result is returned and can be manipulated as in :ref:`Simple Cone Search Examples <vo-sec-scs-examples>`. If no ``timeout`` keyword given, it waits until completion: >>> async_result = async_search.get(timeout=30) >>> cone_arr = async_result.array.data >>> cone_arr.size 36184 """ def __init__(self, *args, **kwargs): super(AsyncConeSearch, self).__init__(conesearch, *args, **kwargs) def conesearch(center, radius, verb=1, **kwargs): """Perform Cone Search and returns the result of the first successful query. Parameters ---------- center : `~astropy.coordinates.SkyCoord`, `~astropy.coordinates.BaseCoordinateFrame`, or sequence of length 2 Position of the center of the cone to search. It may be specified as an object from the :ref:`astropy-coordinates` package, or as a length 2 sequence. If a sequence, it is assumed to be ``(RA, DEC)`` in the ICRS coordinate frame, given in decimal degrees. radius : float or `~astropy.units.quantity.Quantity` Radius of the cone to search: - If float is given, it is assumed to be in decimal degrees. - If astropy quantity is given, it is internally converted to degrees. verb : {1, 2, 3} Verbosity indicating how many columns are to be returned in the resulting table. Support for this parameter by a Cone Search service implementation is optional. If the service supports the parameter: 1. Return the bare minimum number of columns that the provider considers useful in describing the returned objects. 2. Return a medium number of columns between the minimum and maximum (inclusive) that are considered by the provider to most typically useful to the user. 3. Return all of the columns that are available for describing the objects. If not supported, the service should ignore the parameter and always return the same columns for every request. catalog_db May be one of the following, in order from easiest to use to most control: - `None`: A database of `astropy.vo.Conf.conesearch_dbname` catalogs is downloaded from `astropy.vo.Conf.vos_baseurl`. The first catalog in the database to successfully return a result is used. - *catalog name*: A name in the database of `astropy.vo.Conf.conesearch_dbname` catalogs at `astropy.vo.Conf.vos_baseurl` is used. For a list of acceptable names, use :func:`list_catalogs`. - *url*: The prefix of a URL to a IVOA Service for `astropy.vo.Conf.conesearch_dbname`. Must end in either '?' or '&'. - `~astropy.vo.client.vos_catalog.VOSCatalog` object: A specific catalog manually downloaded and selected from the database (see :ref:`vo-sec-client-vos`). - Any of the above 3 options combined in a list, in which case they are tried in order. pedantic : bool or `None` When `True`, raise an error when the file violates the spec, otherwise issue a warning. Warnings may be controlled using :py:mod:`warnings` module. When not provided, uses the configuration setting `astropy.io.votable.Conf.pedantic`, which defaults to `False`. verbose : bool Verbose output. cache : bool Use caching for VO Service database. Access to actual VO websites referenced by the database still needs internet connection. Returns ------- obj : `astropy.io.votable.tree.Table` First table from first successful VO service request. Raises ------ ConeSearchError When invalid inputs are passed into Cone Search. VOSError If VO service request fails. """ from .. import conf # Validate RA and DEC ra, dec = _validate_coord(center) # Validate search radius sr = _validate_sr(radius) # Validate verbosity verb = _local_conversion(int, verb) if verb not in (1, 2, 3): # pragma: no cover raise ConeSearchError('Verbosity must be 1, 2, or 3') args = {'RA': ra, 'DEC': dec, 'SR': sr, 'VERB': verb} return vos_catalog.call_vo_service(conf.conesearch_dbname, kwargs=args, **kwargs) class AsyncSearchAll(AsyncBase): """Perform a Cone Search asynchronously, storing all results instead of just the result from first successful query. .. note:: See `~astropy.vo.client.async.AsyncBase` for more details. Parameters ---------- args, kwargs : see :func:`search_all` Examples -------- >>> from astropy import coordinates as coord >>> from astropy import units as u >>> c = coord.ICRS(6.0223 * u.degree, -72.0814 * u.degree) >>> async_searchall = conesearch.AsyncSearchAll(c, 0.5 * u.degree) Check search status: >>> async_search.running() True >>> async_search.done() False Get a dictionary of all search results after a 30-second wait (not to be confused with `astropy.utils.data.Conf.remote_timeout` that governs individual Cone Search queries). If search is still not done after 30 seconds, `TimeoutError` is raised. Otherwise, a dictionary is returned and can be manipulated as in :ref:`Simple Cone Search Examples <vo-sec-scs-examples>`. If no ``timeout`` keyword given, it waits until completion: >>> async_allresults = async_search.get(timeout=30) >>> all_catalogs = list(async_allresults) >>> first_cone_arr = async_allresults[all_catalogs[0]].array.data >>> first_cone_arr.size 36184 """ def __init__(self, *args, **kwargs): AsyncBase.__init__(self, search_all, *args, **kwargs) def search_all(*args, **kwargs): """Perform Cone Search and returns the results of all successful queries. .. warning:: Could potentially take up significant run time and computing resources. Parameters ---------- args, kwargs : Arguments and keywords accepted by :func:`conesearch`. Returns ------- all_results : dict of `astropy.io.votable.tree.Table` objects A dictionary of tables from successful VO service requests, with keys being the access URLs. If none is successful, an empty dictionary is returned. Raises ------ ConeSearchError When invalid inputs are passed into Cone Search. """ from .. import conf all_results = {} catalog_db = kwargs.get('catalog_db', None) if 'catalog_db' in kwargs: kwargs.pop('catalog_db') cache = kwargs.get('cache', True) verbose = kwargs.get('verbose', True) catalogs = vos_catalog._get_catalogs(conf.conesearch_dbname, catalog_db, cache=cache, verbose=verbose) for name, catalog in catalogs: try: result = conesearch(catalog_db=catalog, *args, **kwargs) except VOSError: pass else: all_results[result.url] = result return all_results def list_catalogs(**kwargs): """Return the available Cone Search catalogs as a list of strings. These can be used for the ``catalog_db`` argument to :func:`conesearch`. Parameters ---------- cache : bool Use caching for VO Service database. Access to actual VO websites referenced by the database still needs internet connection. verbose : bool Show download progress bars. pattern : str or `None` If given string is anywhere in a catalog name, it is considered a matching catalog. It accepts patterns as in :py:mod:`fnmatch` and is case-insensitive. By default, all catalogs are returned. sort : bool Sort output in alphabetical order. If not sorted, the order depends on dictionary hashing. Default is `True`. Returns ------- arr : list of str List of catalog names. """ from .. import conf return vos_catalog.list_catalogs(conf.conesearch_dbname, **kwargs) def predict_search(url, *args, **kwargs): """Predict the run time needed and the number of objects for a Cone Search for the given access URL, position, and radius. Run time prediction uses `astropy.utils.timer.RunTimePredictor`. Baseline searches are done with starting and ending radii at 0.05 and 0.5 of the given radius, respectively. Extrapolation on good data uses least-square straight line fitting, assuming linear increase of search time and number of objects with radius, which might not be accurate for some cases. If there are less than 3 data points in the fit, it fails. Warnings (controlled by :py:mod:`warnings`) are given when: #. Fitted slope is negative. #. Any of the estimated results is negative. #. Estimated run time exceeds `astropy.utils.data.Conf.remote_timeout`. .. note:: If ``verbose=True``, extra log info will be provided. But unlike :func:`conesearch_timer`, timer info is suppressed. If ``plot=True``, plot will be displayed. Plotting uses `matplotlib <http://matplotlib.sourceforge.net/>`_. The predicted results are just *rough* estimates. Prediction is done using :func:`conesearch`. Prediction for `AsyncConeSearch` is not supported. Parameters ---------- url : str Cone Search access URL to use. args, kwargs : see :func:`conesearch` Extra keyword ``plot`` is allowed and only used by this function and not :func:`conesearch`. Returns ------- t_est : float Estimated time in seconds needed for the search. n_est : int Estimated number of objects the search will yield. Raises ------ AssertionError If prediction fails. ConeSearchError If input parameters are invalid. VOSError If VO service request fails. """ if len(args) != 2: # pragma: no cover raise ConeSearchError('conesearch must have exactly 2 arguments') plot = kwargs.get('plot', False) if 'plot' in kwargs: # pragma: no cover del kwargs['plot'] center, radius = args sr = _validate_sr(radius) if sr <= 0: raise ConeSearchError('Search radius must be > 0 degrees') kwargs['catalog_db'] = url cs_pred = RunTimePredictor(conesearch, center, **kwargs) # Search properties for timer extrapolation num_datapoints = 10 # Number of desired data points for extrapolation sr_min = 0.05 * sr # Min radius to start the timer sr_max = 0.5 * sr # Max radius to stop the timer sr_step = (1.0 / num_datapoints) * (sr_max - sr_min) # Radius step # Slowly increase radius to get data points for extrapolation sr_arr = np.arange(sr_min, sr_max + sr_step, sr_step) cs_pred.time_func(sr_arr) # Predict run time t_coeffs = cs_pred.do_fit() t_est = cs_pred.predict_time(sr) if t_est < 0 or t_coeffs[1] < 0: # pragma: no cover warnings.warn('Estimated runtime ({0} s) is non-physical with slope of ' '{1}'.format(t_est, t_coeffs[1]), AstropyUserWarning) elif t_est > data.conf.remote_timeout: # pragma: no cover warnings.warn('Estimated runtime is longer than timeout of ' '{0} s'.format(data.conf.remote_timeout), AstropyUserWarning) # Predict number of objects sr_arr = sorted(cs_pred.results) # Orig with floating point error n_arr = [cs_pred.results[key].array.size for key in sr_arr] n_coeffs = np.polyfit(sr_arr, n_arr, 1) n_fitfunc = np.poly1d(n_coeffs) n_est = int(round(n_fitfunc(sr))) if n_est < 0 or n_coeffs[0] < 0: # pragma: no cover warnings.warn('Estimated #objects ({0}) is non-physical with slope of ' '{1}'.format(n_est, n_coeffs[0]), AstropyUserWarning) if plot: # pragma: no cover import matplotlib.pyplot as plt xlabeltext = 'radius (deg)' sr_fit = np.append(sr_arr, sr) n_fit = n_fitfunc(sr_fit) cs_pred.plot(xlabeltext=xlabeltext) fig, ax = plt.subplots() ax.plot(sr_arr, n_arr, 'kx-', label='Actual') ax.plot(sr_fit, n_fit, 'b--', label='Fit') ax.scatter([sr], [n_est], marker='o', c='r', label='Predicted') ax.set_xlabel(xlabeltext) ax.set_ylabel('#objects') ax.legend(loc='best', numpoints=1) plt.draw() return t_est, n_est @timefunc(1) def conesearch_timer(*args, **kwargs): """Time a single Cone Search using `astropy.utils.timer.timefunc` with a single try and a verbose timer. Parameters ---------- args, kwargs : see :func:`conesearch` Returns ------- t : float Run time in seconds. obj : `astropy.io.votable.tree.Table` First table from first successful VO service request. """ return conesearch(*args, **kwargs) def _local_conversion(func, x): """Try ``func(x)`` and replace `~.exceptions.ValueError` with ``ConeSearchError``.""" try: y = func(x) except ValueError as e: # pragma: no cover raise ConeSearchError(str(e)) else: return y def _validate_coord(center): """Validate coordinates.""" if isinstance(center, SkyCoord): icrscoord = center.transform_to(ICRS).frame elif isinstance(center, BaseCoordinateFrame): icrscoord = center.transform_to(ICRS) else: icrscoord = ICRS(Longitude(center[0], unit=u.degree), Latitude(center[1], unit=u.degree)) return icrscoord.ra.degree, icrscoord.dec.degree def _validate_sr(radius): """Validate search radius.""" if isinstance(radius, Quantity): sr_angle = radius.to(u.degree) else: sr_angle = radius * u.degree return sr_angle.value
# Copyright (c) 2020 ING Bank N.V. # # Permission is hereby granted, free of charge, to any person obtaining a copy of # this software and associated documentation files (the "Software"), to deal in # the Software without restriction, including without limitation the rights to # use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of # the Software, and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS # FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR # COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER # IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. from collections import defaultdict import numpy as np from tqdm import tqdm import warnings from .warnings import NotIntendedUseWarning class TreePathFinder: """ Class to calculate the boundaries of a decision tree. It retrieves the structure from the decision tree """ def __init__(self, estimator): self.estimator = estimator self.n_nodes = estimator.tree_.node_count self.children_left = estimator.tree_.children_left self.children_right = estimator.tree_.children_right self.feature = estimator.tree_.feature self.threshold = estimator.tree_.threshold self.is_leaves = self._find_leaves() self.decision_path = self.find_decision_to_leaves() self.bin_boundaries = self.find_bin_boundaries() def _find_leaves(self): # The tree structure can be traversed to compute various properties such # as the depth of each node and whether or not it is a leaf. n_nodes = self.n_nodes children_left = self.children_left children_right = self.children_right node_depth = np.zeros(shape=n_nodes, dtype=np.int64) is_leaves = np.zeros(shape=n_nodes, dtype=bool) stack = [(0, -1)] # seed is the root node id and its parent depth while len(stack) > 0: node_id, parent_depth = stack.pop() node_depth[node_id] = parent_depth + 1 # If we have a test node if children_left[node_id] != children_right[node_id]: stack.append((children_left[node_id], parent_depth + 1)) stack.append((children_right[node_id], parent_depth + 1)) else: is_leaves[node_id] = True return is_leaves def find_parent(self, leaf_id): in_left = np.where(self.children_left == leaf_id)[0] in_right = np.where(self.children_right == leaf_id)[0] is_inleft = len(in_left) > 0 is_inright = len(in_right) > 0 if is_inleft & is_inright: raise ValueError(f"leaf with id {leaf_id} not found in tree") elif is_inleft: parent = in_left[0] operator = "<=" elif is_inright: parent = in_right[0] operator = ">" else: parent = 0 operator = "None" threshold = self.threshold[parent] feature = self.feature[parent] return parent, threshold, operator, feature def find_decision_to_leaves(self): leaves_ids = np.where(self.is_leaves)[0] decision_path = defaultdict(list) for leaf_id in tqdm(leaves_ids): parent_id = -1 node_id = leaf_id while parent_id != 0: path_step = self.find_parent(node_id) parent_id = path_step[0] decision_path[leaf_id].append(path_step) node_id = parent_id return decision_path def find_bin_boundaries(self): out_dict = dict() for leaf_id in self.decision_path.keys(): one_leaf_decisions = self.decision_path[leaf_id] if "<=" not in [oper[2] for oper in one_leaf_decisions]: max_val = np.inf else: max_val = min( [oper[1] for oper in one_leaf_decisions if oper[2] == "<="] ) if ">" not in [oper[2] for oper in one_leaf_decisions]: min_val = -np.inf else: min_val = max( [oper[1] for oper in one_leaf_decisions if oper[2] == ">"] ) out_dict[leaf_id] = { "min": min_val, "max": max_val, } return out_dict def get_boundaries(self): # check how many features are there. There is always a unique negative value in the array of features # that corresponds to the index of the leaves. # Hence the total number of features in the tree is the length of the array -1 n_features = len(np.unique(self.feature)) - 1 if n_features > 1: warning = ( f"This functionality is intended for trees fitted on 1 feature. The current tree is fitted " f"with {n_features} features" ) warnings.warn(NotIntendedUseWarning(warning)) return self.bin_boundaries
from __future__ import print_function, unicode_literals, absolute_import import codecs import contextlib import itertools import os import re import sys try: from StringIO import StringIO except ImportError: from io import StringIO # Check if a generator has at least one element. # # Since we don't want to consume the element the function return a tuple. # The first element is a boolean telling whether or not the generator is empty. # The second element is a new generator where the first element has been # put back. def empty_iterator_wrap(iterator): try: first = next(iterator) except StopIteration: return True, None return False, itertools.chain([first], iterator) # compatibility function, # not as smart as the version of the Python standard library @contextlib.contextmanager def suppress(*exceptions): """Context manager to suppress specified exceptions with suppress(OSError): os.remove(somefile) """ try: yield except exceptions: pass def re_fullmatch(regex, string, flags=0): """Emulate python-3.4 re.fullmatch().""" return re.match("(?:" + regex + r")\Z", string, flags=flags) # The issue this function tries to solve is to have a text writer where unicode # data can be written without decoding error. It should work in the following # conditions: # - python 2 & 3, output to terminal # - python 2 & 3, output to a pipe or shell redirection # - python 2 & 3, output to a StringIO # # When using python 2, if the program output is redirected to a pipe or file, # the output encoding may be set to 'ascii', # potentially producing UnicodeEncodeError. # Redirections do not seem to cause such issue with python 3 # but explicit utf-8 encoding seems a sensible choice to output data to be # consumed by other programs (e.g: JSON). def stdout_unicode_writer(): stream = sys.stdout if isinstance(stream, StringIO): return stream if hasattr(stream, 'buffer'): stream = stream.buffer return codecs.getwriter('utf-8')(stream) def get_friendly_path(path): full_path = os.path.normpath(path) try: rel_path = os.path.relpath(full_path) except ValueError: # on Windows, we can get a ValueError # if the current directory is on another drive: # > ValueError: path is on drive D:, start on drive C: # > -- https://github.com/Sarcasm/compdb/issues/16 return full_path if rel_path.startswith(os.path.join(os.pardir, os.pardir)): friendly_path = full_path else: friendly_path = rel_path return friendly_path def logical_abspath(p): """Same as os.path.abspath, but use the logical current working to expand relative paths. """ if os.path.isabs(p): return os.path.normpath(p) cwd = os.getenv('PWD') if cwd and os.path.isabs(cwd) and os.path.samefile(cwd, '.'): return os.path.normpath(os.path.join(cwd, p)) return os.path.abspath(p) def locate_dominating_file(name, start_dir=os.curdir): curdir = os.path.abspath(start_dir) olddir = None while not curdir == olddir: if os.path.exists(os.path.join(curdir, name)): return curdir olddir = curdir curdir = os.path.dirname(curdir) return None
# -*- coding: utf-8 -*- # PLEASE DO NOT EDIT THIS FILE, IT IS GENERATED AND WILL BE OVERWRITTEN: # https://github.com/ccxt/ccxt/blob/master/CONTRIBUTING.md#how-to-contribute-code from ccxt.async_support.base.exchange import Exchange import math from ccxt.base.errors import ExchangeError from ccxt.base.errors import AuthenticationError from ccxt.base.errors import ArgumentsRequired from ccxt.base.errors import InsufficientFunds from ccxt.base.errors import InvalidOrder from ccxt.base.errors import OrderNotFound class coinex(Exchange): def describe(self): return self.deep_extend(super(coinex, self).describe(), { 'id': 'coinex', 'name': 'CoinEx', 'version': 'v1', 'countries': ['CN'], 'rateLimit': 1000, 'has': { 'fetchTickers': True, 'fetchOHLCV': True, 'fetchOrder': True, 'fetchOpenOrders': True, 'fetchClosedOrders': True, 'fetchMyTrades': True, 'withdraw': True, 'fetchDeposits': True, 'fetchWithdrawals': True, }, 'timeframes': { '1m': '1min', '3m': '3min', '5m': '5min', '15m': '15min', '30m': '30min', '1h': '1hour', '2h': '2hour', '4h': '4hour', '6h': '6hour', '12h': '12hour', '1d': '1day', '3d': '3day', '1w': '1week', }, 'urls': { 'logo': 'https://user-images.githubusercontent.com/1294454/38046312-0b450aac-32c8-11e8-99ab-bc6b136b6cc7.jpg', 'api': 'https://api.coinex.com', 'www': 'https://www.coinex.com', 'doc': 'https://github.com/coinexcom/coinex_exchange_api/wiki', 'fees': 'https://www.coinex.com/fees', 'referral': 'https://www.coinex.com/register?refer_code=yw5fz', }, 'api': { 'public': { 'get': [ 'common/currency/rate', 'common/asset/config', 'market/info', 'market/list', 'market/ticker', 'market/ticker/all', 'market/depth', 'market/deals', 'market/kline', ], }, 'private': { 'get': [ 'balance/coin/deposit', 'balance/coin/withdraw', 'balance/info', 'future/account', 'future/config', 'future/limitprice', 'future/loan/history', 'future/market', 'margin/account', 'margin/config', 'margin/loan/history', 'margin/market', 'order', 'order/deals', 'order/finished', 'order/finished/{id}', 'order/pending', 'order/status', 'order/status/batch', 'order/user/deals', ], 'post': [ 'balance/coin/withdraw', 'future/flat', 'future/loan', 'future/transfer', 'margin/flat', 'margin/loan', 'margin/transfer', 'order/batchlimit', 'order/ioc', 'order/limit', 'order/market', 'sub_account/transfer', ], 'delete': [ 'balance/coin/withdraw', 'order/pending/batch', 'order/pending', ], }, }, 'fees': { 'trading': { 'maker': 0.001, 'taker': 0.001, }, 'funding': { 'withdraw': { 'BCH': 0.0, 'BTC': 0.001, 'LTC': 0.001, 'ETH': 0.001, 'ZEC': 0.0001, 'DASH': 0.0001, }, }, }, 'limits': { 'amount': { 'min': 0.001, 'max': None, }, }, 'precision': { 'amount': 8, 'price': 8, }, 'options': { 'createMarketBuyOrderRequiresPrice': True, }, }) async def fetch_markets(self, params={}): response = await self.publicGetMarketInfo(params) # # { # "code": 0, # "data": { # "WAVESBTC": { # "name": "WAVESBTC", # "min_amount": "1", # "maker_fee_rate": "0.001", # "taker_fee_rate": "0.001", # "pricing_name": "BTC", # "pricing_decimal": 8, # "trading_name": "WAVES", # "trading_decimal": 8 # } # } # } # markets = self.safe_value(response, 'data', {}) result = [] keys = list(markets.keys()) for i in range(0, len(keys)): key = keys[i] market = markets[key] id = self.safe_string(market, 'name') tradingName = self.safe_string(market, 'trading_name') baseId = tradingName quoteId = self.safe_string(market, 'pricing_name') base = self.safe_currency_code(baseId) quote = self.safe_currency_code(quoteId) symbol = base + '/' + quote if tradingName == id: symbol = id precision = { 'amount': self.safe_integer(market, 'trading_decimal'), 'price': self.safe_integer(market, 'pricing_decimal'), } active = None result.append({ 'id': id, 'symbol': symbol, 'base': base, 'quote': quote, 'baseId': baseId, 'quoteId': quoteId, 'active': active, 'taker': self.safe_float(market, 'taker_fee_rate'), 'maker': self.safe_float(market, 'maker_fee_rate'), 'info': market, 'precision': precision, 'limits': { 'amount': { 'min': self.safe_float(market, 'min_amount'), 'max': None, }, 'price': { 'min': math.pow(10, -precision['price']), 'max': None, }, }, }) return result def parse_ticker(self, ticker, market=None): timestamp = self.safe_integer(ticker, 'date') symbol = None if market is not None: symbol = market['symbol'] ticker = self.safe_value(ticker, 'ticker', {}) last = self.safe_float(ticker, 'last') return { 'symbol': symbol, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'high': self.safe_float(ticker, 'high'), 'low': self.safe_float(ticker, 'low'), 'bid': self.safe_float(ticker, 'buy'), 'bidVolume': None, 'ask': self.safe_float(ticker, 'sell'), 'askVolume': None, 'vwap': None, 'open': None, 'close': last, 'last': last, 'previousClose': None, 'change': None, 'percentage': None, 'average': None, 'baseVolume': self.safe_float_2(ticker, 'vol', 'volume'), 'quoteVolume': None, 'info': ticker, } async def fetch_ticker(self, symbol, params={}): await self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = await self.publicGetMarketTicker(self.extend(request, params)) return self.parse_ticker(response['data'], market) async def fetch_tickers(self, symbols=None, params={}): await self.load_markets() response = await self.publicGetMarketTickerAll(params) data = self.safe_value(response, 'data') timestamp = self.safe_integer(data, 'date') tickers = self.safe_value(data, 'ticker') marketIds = list(tickers.keys()) result = {} for i in range(0, len(marketIds)): marketId = marketIds[i] symbol = marketId market = None if marketId in self.markets_by_id: market = self.markets_by_id[marketId] symbol = market['symbol'] ticker = self.parse_ticker({ 'date': timestamp, 'ticker': tickers[marketId], }, market) ticker['symbol'] = symbol result[symbol] = ticker return result async def fetch_order_book(self, symbol, limit=20, params={}): await self.load_markets() if limit is None: limit = 20 # default request = { 'market': self.market_id(symbol), 'merge': '0.0000000001', 'limit': str(limit), } response = await self.publicGetMarketDepth(self.extend(request, params)) return self.parse_order_book(response['data']) def parse_trade(self, trade, market=None): # self method parses both public and private trades timestamp = self.safe_timestamp(trade, 'create_time') if timestamp is None: timestamp = self.safe_integer(trade, 'date_ms') tradeId = self.safe_string(trade, 'id') orderId = self.safe_string(trade, 'order_id') price = self.safe_float(trade, 'price') amount = self.safe_float(trade, 'amount') marketId = self.safe_string(trade, 'market') market = self.safe_value(self.markets_by_id, marketId, market) symbol = None if market is not None: symbol = market['symbol'] cost = self.safe_float(trade, 'deal_money') if not cost: cost = float(self.cost_to_precision(symbol, price * amount)) fee = None feeCost = self.safe_float(trade, 'fee') if feeCost is not None: feeCurrencyId = self.safe_string(trade, 'fee_asset') feeCurrencyCode = self.safe_currency_code(feeCurrencyId) fee = { 'cost': feeCost, 'currency': feeCurrencyCode, } takerOrMaker = self.safe_string(trade, 'role') side = self.safe_string(trade, 'type') return { 'info': trade, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'symbol': symbol, 'id': tradeId, 'order': orderId, 'type': None, 'side': side, 'takerOrMaker': takerOrMaker, 'price': price, 'amount': amount, 'cost': cost, 'fee': fee, } async def fetch_trades(self, symbol, since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'market': market['id'], } response = await self.publicGetMarketDeals(self.extend(request, params)) return self.parse_trades(response['data'], market, since, limit) def parse_ohlcv(self, ohlcv, market=None): # # [ # 1591484400, # "0.02505349", # "0.02506988", # "0.02507000", # "0.02505304", # "343.19716223", # "8.6021323866383196", # "ETHBTC" # ] # return [ self.safe_timestamp(ohlcv, 0), self.safe_float(ohlcv, 1), self.safe_float(ohlcv, 3), self.safe_float(ohlcv, 4), self.safe_float(ohlcv, 2), self.safe_float(ohlcv, 5), ] async def fetch_ohlcv(self, symbol, timeframe='5m', since=None, limit=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'market': market['id'], 'type': self.timeframes[timeframe], } response = await self.publicGetMarketKline(self.extend(request, params)) # # { # "code": 0, # "data": [ # [1591484400, "0.02505349", "0.02506988", "0.02507000", "0.02505304", "343.19716223", "8.6021323866383196", "ETHBTC"], # [1591484700, "0.02506990", "0.02508109", "0.02508109", "0.02506979", "91.59841581", "2.2972047780447000", "ETHBTC"], # [1591485000, "0.02508106", "0.02507996", "0.02508106", "0.02507500", "65.15307697", "1.6340597822306000", "ETHBTC"], # ], # "message": "OK" # } # data = self.safe_value(response, 'data', []) return self.parse_ohlcvs(data, market, timeframe, since, limit) async def fetch_balance(self, params={}): await self.load_markets() response = await self.privateGetBalanceInfo(params) # # { # "code": 0, # "data": { # "BCH": { # BCH account # "available": "13.60109", # Available BCH # "frozen": "0.00000" # Frozen BCH # }, # "BTC": { # BTC account # "available": "32590.16", # Available BTC # "frozen": "7000.00" # Frozen BTC # }, # "ETH": { # ETH account # "available": "5.06000", # Available ETH # "frozen": "0.00000" # Frozen ETH # } # }, # "message": "Ok" # } # result = {'info': response} balances = self.safe_value(response, 'data') currencyIds = list(balances.keys()) for i in range(0, len(currencyIds)): currencyId = currencyIds[i] code = self.safe_currency_code(currencyId) balance = self.safe_value(balances, currencyId, {}) account = self.account() account['free'] = self.safe_float(balance, 'available') account['used'] = self.safe_float(balance, 'frozen') result[code] = account return self.parse_balance(result) def parse_order_status(self, status): statuses = { 'not_deal': 'open', 'part_deal': 'open', 'done': 'closed', 'cancel': 'canceled', } return self.safe_string(statuses, status, status) def parse_order(self, order, market=None): # # fetchOrder # # { # "amount": "0.1", # "asset_fee": "0.22736197736197736197", # "avg_price": "196.85000000000000000000", # "create_time": 1537270135, # "deal_amount": "0.1", # "deal_fee": "0", # "deal_money": "19.685", # "fee_asset": "CET", # "fee_discount": "0.5", # "id": 1788259447, # "left": "0", # "maker_fee_rate": "0", # "market": "ETHUSDT", # "order_type": "limit", # "price": "170.00000000", # "status": "done", # "taker_fee_rate": "0.0005", # "type": "sell", # } # timestamp = self.safe_timestamp(order, 'create_time') price = self.safe_float(order, 'price') cost = self.safe_float(order, 'deal_money') amount = self.safe_float(order, 'amount') filled = self.safe_float(order, 'deal_amount') average = self.safe_float(order, 'avg_price') symbol = None marketId = self.safe_string(order, 'market') market = self.safe_value(self.markets_by_id, marketId) feeCurrencyId = self.safe_string(order, 'fee_asset') feeCurrency = self.safe_currency_code(feeCurrencyId) if market is not None: symbol = market['symbol'] if feeCurrency is None: feeCurrency = market['quote'] remaining = self.safe_float(order, 'left') status = self.parse_order_status(self.safe_string(order, 'status')) type = self.safe_string(order, 'order_type') side = self.safe_string(order, 'type') return { 'id': self.safe_string(order, 'id'), 'clientOrderId': None, 'datetime': self.iso8601(timestamp), 'timestamp': timestamp, 'lastTradeTimestamp': None, 'status': status, 'symbol': symbol, 'type': type, 'side': side, 'price': price, 'cost': cost, 'average': average, 'amount': amount, 'filled': filled, 'remaining': remaining, 'trades': None, 'fee': { 'currency': feeCurrency, 'cost': self.safe_float(order, 'deal_fee'), }, 'info': order, } async def create_order(self, symbol, type, side, amount, price=None, params={}): await self.load_markets() method = 'privatePostOrder' + self.capitalize(type) market = self.market(symbol) request = { 'market': market['id'], 'type': side, } amount = float(amount) # for market buy it requires the amount of quote currency to spend if (type == 'market') and (side == 'buy'): if self.options['createMarketBuyOrderRequiresPrice']: if price is None: raise InvalidOrder(self.id + " createOrder() requires the price argument with market buy orders to calculate total order cost(amount to spend), where cost = amount * price. Supply a price argument to createOrder() call if you want the cost to be calculated for you from price and amount, or, alternatively, add .options['createMarketBuyOrderRequiresPrice'] = False to supply the cost in the amount argument(the exchange-specific behaviour)") else: price = float(price) request['amount'] = self.cost_to_precision(symbol, amount * price) else: request['amount'] = self.cost_to_precision(symbol, amount) else: request['amount'] = self.amount_to_precision(symbol, amount) if (type == 'limit') or (type == 'ioc'): request['price'] = self.price_to_precision(symbol, price) response = await getattr(self, method)(self.extend(request, params)) order = self.parse_order(response['data'], market) id = order['id'] self.orders[id] = order return order async def cancel_order(self, id, symbol=None, params={}): await self.load_markets() market = self.market(symbol) request = { 'id': id, 'market': market['id'], } response = await self.privateDeleteOrderPending(self.extend(request, params)) return self.parse_order(response['data'], market) async def fetch_order(self, id, symbol=None, params={}): if symbol is None: raise ArgumentsRequired(self.id + ' fetchOrder requires a symbol argument') await self.load_markets() market = self.market(symbol) request = { 'id': id, 'market': market['id'], } response = await self.privateGetOrder(self.extend(request, params)) # # { # "code": 0, # "data": { # "amount": "0.1", # "asset_fee": "0.22736197736197736197", # "avg_price": "196.85000000000000000000", # "create_time": 1537270135, # "deal_amount": "0.1", # "deal_fee": "0", # "deal_money": "19.685", # "fee_asset": "CET", # "fee_discount": "0.5", # "id": 1788259447, # "left": "0", # "maker_fee_rate": "0", # "market": "ETHUSDT", # "order_type": "limit", # "price": "170.00000000", # "status": "done", # "taker_fee_rate": "0.0005", # "type": "sell", # }, # "message": "Ok" # } # return self.parse_order(response['data'], market) async def fetch_orders_by_status(self, status, symbol=None, since=None, limit=None, params={}): await self.load_markets() if limit is None: limit = 100 request = { 'page': 1, 'limit': limit, } market = None if symbol is not None: market = self.market(symbol) request['market'] = market['id'] method = 'privateGetOrder' + self.capitalize(status) response = await getattr(self, method)(self.extend(request, params)) return self.parse_orders(response['data']['data'], market, since, limit) async def fetch_open_orders(self, symbol=None, since=None, limit=None, params={}): return await self.fetch_orders_by_status('pending', symbol, since, limit, params) async def fetch_closed_orders(self, symbol=None, since=None, limit=None, params={}): return await self.fetch_orders_by_status('finished', symbol, since, limit, params) async def fetch_my_trades(self, symbol=None, since=None, limit=None, params={}): await self.load_markets() if limit is None: limit = 100 request = { 'page': 1, 'limit': limit, } market = None if symbol is not None: market = self.market(symbol) request['market'] = market['id'] response = await self.privateGetOrderUserDeals(self.extend(request, params)) return self.parse_trades(response['data']['data'], market, since, limit) async def withdraw(self, code, amount, address, tag=None, params={}): self.check_address(address) await self.load_markets() currency = self.currency(code) if tag: address = address + ':' + tag request = { 'coin_type': currency['id'], 'coin_address': address, # must be authorized, inter-user transfer by a registered mobile phone number or an email address is supported 'actual_amount': float(amount), # the actual amount without fees, https://www.coinex.com/fees 'transfer_method': 'onchain', # onchain, local } response = await self.privatePostBalanceCoinWithdraw(self.extend(request, params)) # # { # "code": 0, # "data": { # "actual_amount": "1.00000000", # "amount": "1.00000000", # "coin_address": "1KAv3pazbTk2JnQ5xTo6fpKK7p1it2RzD4", # "coin_type": "BCH", # "coin_withdraw_id": 206, # "confirmations": 0, # "create_time": 1524228297, # "status": "audit", # "tx_fee": "0", # "tx_id": "" # }, # "message": "Ok" # } # transaction = self.safe_value(response, 'data', {}) return self.parse_transaction(transaction, currency) def parse_transaction_status(self, status): statuses = { 'audit': 'pending', 'pass': 'pending', 'processing': 'pending', 'confirming': 'pending', 'not_pass': 'failed', 'cancel': 'canceled', 'finish': 'ok', 'fail': 'failed', } return self.safe_string(statuses, status, status) def parse_transaction(self, transaction, currency=None): # # fetchDeposits # # { # "actual_amount": "120.00000000", # "actual_amount_display": "120", # "add_explorer": "XXX", # "amount": "120.00000000", # "amount_display": "120", # "coin_address": "XXXXXXXX", # "coin_address_display": "XXXXXXXX", # "coin_deposit_id": 1866, # "coin_type": "USDT", # "confirmations": 0, # "create_time": 1539595701, # "explorer": "", # "remark": "", # "status": "finish", # "status_display": "finish", # "transfer_method": "local", # "tx_id": "", # "tx_id_display": "XXXXXXXXXX" # } # # fetchWithdrawals # # { # "actual_amount": "0.10000000", # "amount": "0.10000000", # "coin_address": "15sr1VdyXQ6sVLqeJUJ1uPzLpmQtgUeBSB", # "coin_type": "BCH", # "coin_withdraw_id": 203, # "confirmations": 11, # "create_time": 1515806440, # "status": "finish", # "tx_fee": "0", # "tx_id": "896371d0e23d64d1cac65a0b7c9e9093d835affb572fec89dd4547277fbdd2f6" # } # id = self.safe_string_2(transaction, 'coin_withdraw_id', 'coin_deposit_id') address = self.safe_string(transaction, 'coin_address') tag = self.safe_string(transaction, 'remark') # set but unused if tag is not None: if len(tag) < 1: tag = None txid = self.safe_value(transaction, 'tx_id') if txid is not None: if len(txid) < 1: txid = None currencyId = self.safe_string(transaction, 'coin_type') code = self.safe_currency_code(currencyId, currency) timestamp = self.safe_timestamp(transaction, 'create_time') type = 'withdraw' if ('coin_withdraw_id' in transaction) else 'deposit' status = self.parse_transaction_status(self.safe_string(transaction, 'status')) amount = self.safe_float(transaction, 'amount') feeCost = self.safe_float(transaction, 'tx_fee') if type == 'deposit': feeCost = 0 fee = { 'cost': feeCost, 'currency': code, } return { 'info': transaction, 'id': id, 'txid': txid, 'timestamp': timestamp, 'datetime': self.iso8601(timestamp), 'address': address, 'tag': tag, 'type': type, 'amount': amount, 'currency': code, 'status': status, 'updated': None, 'fee': fee, } async def fetch_withdrawals(self, code=None, since=None, limit=None, params={}): if code is None: raise ArgumentsRequired(self.id + ' fetchWithdrawals requires a currency code argument') await self.load_markets() currency = self.currency(code) request = { 'coin_type': currency['id'], } if limit is not None: request['Limit'] = limit response = await self.privateGetBalanceCoinWithdraw(self.extend(request, params)) # # { # "code": 0, # "data": [ # { # "actual_amount": "1.00000000", # "amount": "1.00000000", # "coin_address": "1KAv3pazbTk2JnQ5xTo6fpKK7p1it2RzD4", # "coin_type": "BCH", # "coin_withdraw_id": 206, # "confirmations": 0, # "create_time": 1524228297, # "status": "audit", # "tx_fee": "0", # "tx_id": "" # }, # { # "actual_amount": "0.10000000", # "amount": "0.10000000", # "coin_address": "15sr1VdyXQ6sVLqeJUJ1uPzLpmQtgUeBSB", # "coin_type": "BCH", # "coin_withdraw_id": 203, # "confirmations": 11, # "create_time": 1515806440, # "status": "finish", # "tx_fee": "0", # "tx_id": "896371d0e23d64d1cac65a0b7c9e9093d835affb572fec89dd4547277fbdd2f6" # }, # { # "actual_amount": "0.00100000", # "amount": "0.00100000", # "coin_address": "1GVVx5UBddLKrckTprNi4VhHSymeQ8tsLF", # "coin_type": "BCH", # "coin_withdraw_id": 27, # "confirmations": 0, # "create_time": 1513933541, # "status": "cancel", # "tx_fee": "0", # "tx_id": "" # } # ], # "message": "Ok" # } # return self.parse_transactions(response['data'], currency, since, limit) async def fetch_deposits(self, code=None, since=None, limit=None, params={}): if code is None: raise ArgumentsRequired(self.id + ' fetchDeposits requires a currency code argument') await self.load_markets() currency = self.currency(code) request = { 'coin_type': currency['id'], } if limit is not None: request['Limit'] = limit response = await self.privateGetBalanceCoinDeposit(self.extend(request, params)) # { # "code": 0, # "data": [ # { # "actual_amount": "4.65397682", # "actual_amount_display": "4.65397682", # "add_explorer": "https://etherscan.io/address/0x361XXXXXX", # "amount": "4.65397682", # "amount_display": "4.65397682", # "coin_address": "0x36dabcdXXXXXX", # "coin_address_display": "0x361X*****XXXXX", # "coin_deposit_id": 966191, # "coin_type": "ETH", # "confirmations": 30, # "create_time": 1531661445, # "explorer": "https://etherscan.io/tx/0x361XXXXXX", # "remark": "", # "status": "finish", # "status_display": "finish", # "transfer_method": "onchain", # "tx_id": "0x361XXXXXX", # "tx_id_display": "0x361XXXXXX" # } # ], # "message": "Ok" # } # return self.parse_transactions(response['data'], currency, since, limit) def nonce(self): return self.milliseconds() def sign(self, path, api='public', method='GET', params={}, headers=None, body=None): path = self.implode_params(path, params) url = self.urls['api'] + '/' + self.version + '/' + path query = self.omit(params, self.extract_params(path)) if api == 'public': if query: url += '?' + self.urlencode(query) else: self.check_required_credentials() nonce = self.nonce() query = self.extend({ 'access_id': self.apiKey, 'tonce': str(nonce), }, query) query = self.keysort(query) urlencoded = self.urlencode(query) signature = self.hash(self.encode(urlencoded + '&secret_key=' + self.secret)) headers = { 'Authorization': signature.upper(), 'Content-Type': 'application/json', } if (method == 'GET') or (method == 'DELETE'): url += '?' + urlencoded else: body = self.json(query) return {'url': url, 'method': method, 'body': body, 'headers': headers} async def request(self, path, api='public', method='GET', params={}, headers=None, body=None): response = await self.fetch2(path, api, method, params, headers, body) code = self.safe_string(response, 'code') data = self.safe_value(response, 'data') message = self.safe_string(response, 'message') if (code != '0') or (data is None) or ((message != 'Ok') and not data): responseCodes = { '24': AuthenticationError, '25': AuthenticationError, '107': InsufficientFunds, '600': OrderNotFound, '601': InvalidOrder, '602': InvalidOrder, '606': InvalidOrder, } ErrorClass = self.safe_value(responseCodes, code, ExchangeError) raise ErrorClass(response['message']) return response
import json import datetime import os from google.cloud import tasks_v2 from google.cloud.bigquery.reservation_v1 import ReservationServiceClient from google.cloud.bigquery.reservation_v1 import CapacityCommitment from google.protobuf import timestamp_pb2 """ env: delete_url="https://us-central1-my-test-project.cloudfunctions.net/delete_slot_capacity" delete_queue="projects/my-test-project/locations/us-central1/queues/commit-delete-queue" admin_project_id="my-test-project" max_slots = 1000 test payload { "region":"US", "extra_slots":100, "minutes":5 } """ def add_capacity_request(request): request_json = request.get_json(silent=True) print("parsed json {}".format(request_json)) admin_project_id = os.environ.get('admin_project_id', None) max_slots = int(os.environ.get('max_slots', 1000)) queue = os.environ.get('delete_queue', None) url = os.environ.get('delete_url', None) print("retrieved environment variables") if not request_json: return "message body not properly formatted as json" if not (queue and url and admin_project_id): return "'admin_project_id', 'delete_queue', and 'delete_url' environment variables must be set" region = request_json['region'] slots = int(request_json['extra_slots']) minutes = int(request_json['minutes']) commit = add_capacity(admin_project_id, region, slots, max_slots) if commit: resp = launch_delete_task(admin_project_id, region, url, queue, commit.name, minutes) print(resp) return "Created a commitment {}; scheduled removal with {}".format(commit, resp) def add_capacity(admin_project_id, region, extra_slots, max_slots): client = ReservationServiceClient() parent_arg = "projects/{}/locations/{}".format(admin_project_id, region) slots_to_add = check_project_slots(client, parent_arg, extra_slots, max_slots) if slots_to_add <= 0: return None commit_config = CapacityCommitment(plan='FLEX', slot_count=slots_to_add) commit = client.create_capacity_commitment(parent=parent_arg, capacity_commitment=commit_config) return commit def check_project_slots(client, parent_arg, extra_slots, max_slots): total = 0 for commit in client.list_capacity_commitments(parent=parent_arg): total += commit.slot_count slot_cap = max_slots - extra_slots return min(extra_slots, slot_cap) def launch_delete_task(admin_project_id, region, url, queue, commit_id, minutes): client = tasks_v2.CloudTasksClient() payload = { 'commit_id':commit_id } payload_utf8 = json.dumps(payload).encode() d = datetime.datetime.utcnow() + datetime.timedelta(minutes=minutes) timestamp = timestamp_pb2.Timestamp() timestamp.FromDatetime(d) task = { "schedule_time":timestamp, "http_request": { "http_method": tasks_v2.HttpMethod.POST, "url":url, "headers":{"Content-type":"application/json"}, "body":payload_utf8 } } response = client.create_task(request={"parent":queue, "task":task}) return response
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Internal information about the scalar plugin.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorboard.plugins.scalar import plugin_data_pb2 from tensorboard.compat import tf PLUGIN_NAME = 'scalars' # The most recent value for the `version` field of the # `ScalarPluginData` proto. PROTO_VERSION = 0 def create_summary_metadata(display_name, description): """Create a `tf.SummaryMetadata` proto for scalar plugin data. Returns: A `tf.SummaryMetadata` protobuf object. """ content = plugin_data_pb2.ScalarPluginData(version=PROTO_VERSION) metadata = tf.SummaryMetadata( display_name=display_name, summary_description=description, plugin_data=tf.SummaryMetadata.PluginData( plugin_name=PLUGIN_NAME, content=content.SerializeToString())) return metadata def parse_plugin_metadata(content): """Parse summary metadata to a Python object. Arguments: content: The `content` field of a `SummaryMetadata` proto corresponding to the scalar plugin. Returns: A `ScalarPluginData` protobuf object. """ result = plugin_data_pb2.ScalarPluginData() # TODO(@jart): Instead of converting to bytes, assert that the input # is a bytestring, and raise a ValueError otherwise...but only after # converting `PluginData`'s `content` field to have type `bytes` # instead of `string`. result.ParseFromString(tf.compat.as_bytes(content)) if result.version == 0: return result else: tf.logging.warn( 'Unknown metadata version: %s. The latest version known to ' 'this build of TensorBoard is %s; perhaps a newer build is ' 'available?', result.version, PROTO_VERSION) return result
# Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """Tests for Volume Code.""" import datetime import enum import time from unittest import mock import uuid from castellan.common import exception as castellan_exception from castellan import key_manager import ddt import eventlet import os_brick.initiator.connectors.iscsi from oslo_concurrency import processutils from oslo_config import cfg from oslo_utils import imageutils import six from taskflow.engines.action_engine import engine from cinder.api import common from cinder import context from cinder import coordination from cinder import db from cinder import exception from cinder.message import message_field from cinder import objects from cinder.objects import fields from cinder.policies import volumes as vol_policy from cinder import quota from cinder.tests import fake_driver from cinder.tests.unit.api.v2 import fakes as v2_fakes from cinder.tests.unit import conf_fixture from cinder.tests.unit import fake_constants as fake from cinder.tests.unit import fake_snapshot from cinder.tests.unit import fake_volume from cinder.tests.unit.keymgr import fake as fake_keymgr from cinder.tests.unit import utils as tests_utils from cinder.tests.unit import volume as base from cinder import utils import cinder.volume from cinder.volume import driver from cinder.volume import manager as vol_manager from cinder.volume import rpcapi as volume_rpcapi import cinder.volume.targets.tgt from cinder.volume import volume_types QUOTAS = quota.QUOTAS CONF = cfg.CONF ENCRYPTION_PROVIDER = 'nova.volume.encryptors.cryptsetup.CryptsetupEncryptor' fake_opt = [ cfg.StrOpt('fake_opt1', default='fake', help='fake opts') ] def create_snapshot(volume_id, size=1, metadata=None, ctxt=None, **kwargs): """Create a snapshot object.""" metadata = metadata or {} snap = objects.Snapshot(ctxt or context.get_admin_context()) snap.volume_size = size snap.user_id = fake.USER_ID snap.project_id = fake.PROJECT_ID snap.volume_id = volume_id snap.status = fields.SnapshotStatus.CREATING if metadata is not None: snap.metadata = metadata snap.update(kwargs) snap.create() return snap @ddt.ddt class VolumeTestCase(base.BaseVolumeTestCase): def setUp(self): super(VolumeTestCase, self).setUp() self.patch('cinder.volume.volume_utils.clear_volume', autospec=True) self.expected_status = 'available' self.service_id = 1 self.user_context = context.RequestContext(user_id=fake.USER_ID, project_id=fake.PROJECT_ID) elevated = context.get_admin_context() db.volume_type_create(elevated, v2_fakes.fake_default_type_get( id=fake.VOLUME_TYPE2_ID)) self.vol_type = db.volume_type_get_by_name(elevated, '__DEFAULT__') def _create_volume(self, context, **kwargs): return tests_utils.create_volume( context, volume_type_id=volume_types.get_default_volume_type()['id'], **kwargs) @mock.patch('cinder.objects.service.Service.get_minimum_rpc_version') @mock.patch('cinder.objects.service.Service.get_minimum_obj_version') @mock.patch('cinder.rpc.LAST_RPC_VERSIONS', {'cinder-scheduler': '1.3'}) @mock.patch('cinder.rpc.LAST_OBJ_VERSIONS', {'cinder-scheduler': '1.4'}) def test_reset(self, get_min_obj, get_min_rpc): vol_mgr = vol_manager.VolumeManager() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertEqual('1.3', scheduler_rpcapi.client.version_cap) self.assertEqual('1.4', scheduler_rpcapi.client.serializer._base.version_cap) get_min_obj.return_value = objects.base.OBJ_VERSIONS.get_current() vol_mgr.reset() scheduler_rpcapi = vol_mgr.scheduler_rpcapi self.assertEqual(get_min_rpc.return_value, scheduler_rpcapi.client.version_cap) self.assertEqual(get_min_obj.return_value, scheduler_rpcapi.client.serializer._base.version_cap) self.assertIsNone(scheduler_rpcapi.client.serializer._base.manifest) @mock.patch('oslo_utils.importutils.import_object') def test_backend_availability_zone(self, mock_import_object): # NOTE(smcginnis): This isn't really the best place for this test, # but we don't currently have a pure VolumeManager test class. So # until we create a good suite for that class, putting here with # other tests that use VolumeManager. opts = { 'backend_availability_zone': 'caerbannog' } def conf_get(option): if option in opts: return opts[option] return None mock_driver = mock.Mock() mock_driver.configuration.safe_get.side_effect = conf_get mock_driver.configuration.extra_capabilities = 'null' def import_obj(*args, **kwargs): return mock_driver mock_import_object.side_effect = import_obj manager = vol_manager.VolumeManager(volume_driver=mock_driver) self.assertIsNotNone(manager) self.assertEqual(opts['backend_availability_zone'], manager.availability_zone) @mock.patch('cinder.volume.manager.VolumeManager._append_volume_stats', mock.Mock()) @mock.patch.object(vol_manager.VolumeManager, 'update_service_capabilities') def test_report_filter_goodness_function(self, mock_update): manager = vol_manager.VolumeManager() manager.driver.set_initialized() myfilterfunction = "myFilterFunction" mygoodnessfunction = "myGoodnessFunction" expected = {'name': 'cinder-volumes', 'filter_function': myfilterfunction, 'goodness_function': mygoodnessfunction, } with mock.patch.object(manager.driver, 'get_volume_stats') as m_get_stats: with mock.patch.object(manager.driver, 'get_goodness_function') as m_get_goodness: with mock.patch.object(manager.driver, 'get_filter_function') as m_get_filter: m_get_stats.return_value = {'name': 'cinder-volumes'} m_get_filter.return_value = myfilterfunction m_get_goodness.return_value = mygoodnessfunction manager._report_driver_status(context.get_admin_context()) self.assertTrue(m_get_stats.called) mock_update.assert_called_once_with(expected) def test_is_working(self): # By default we have driver mocked to be initialized... self.assertTrue(self.volume.is_working()) # ...lets switch it and check again! self.volume.driver._initialized = False self.assertFalse(self.volume.is_working()) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch.object(QUOTAS, 'reserve') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'rollback') def test_create_driver_not_initialized(self, reserve, commit, rollback, mock_notify): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertIsNone(volume['encryption_key_id']) mock_notify.assert_not_called() self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume) volume = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual("error", volume.status) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_driver_not_initialized_rescheduling(self): self.volume.driver._initialized = False mock_delete = self.mock_object(self.volume.driver, 'delete_volume') volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, volume, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) # NOTE(dulek): As we've rescheduled, make sure delete_volume was # called. self.assertTrue(mock_delete.called) db.volume_destroy(context.get_admin_context(), volume_id) def test_create_non_cinder_exception_rescheduling(self): params = self.volume_params del params['host'] volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **params) volume_id = volume['id'] with mock.patch.object(self.volume.driver, 'create_volume', side_effect=processutils.ProcessExecutionError): self.assertRaises(processutils.ProcessExecutionError, self.volume.create_volume, self.context, volume, {'volume_properties': params}, {'retry': {'num_attempts': 1, 'host': []}}) # NOTE(dulek): Volume should be rescheduled as we passed request_spec # and filter_properties, assert that it wasn't counted in # allocated_capacity tracking. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) db.volume_destroy(context.get_admin_context(), volume_id) @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch.object(QUOTAS, 'rollback') @mock.patch.object(QUOTAS, 'commit') @mock.patch.object(QUOTAS, 'reserve') def test_delete_driver_not_initialized(self, reserve, commit, rollback, mock_notify): self.volume.driver._initialized = False def fake_reserve(context, expire=None, project_id=None, **deltas): return ["RESERVATION"] def fake_commit_and_rollback(context, reservations, project_id=None): pass reserve.return_value = fake_reserve commit.return_value = fake_commit_and_rollback rollback.return_value = fake_commit_and_rollback volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) self.assertIsNone(volume['encryption_key_id']) mock_notify.assert_not_called() self.assertRaises(exception.DriverNotInitialized, self.volume.delete_volume, self.context, volume) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual("error_deleting", volume.status) volume.destroy() @mock.patch('cinder.tests.unit.fake_notifier.FakeNotifier._notify') @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.Mock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=['RESERVATION']) def test_create_delete_volume(self, _mock_reserve, mock_notify): """Test volume can be created and deleted.""" volume = tests_utils.create_volume( self.context, availability_zone=CONF.storage_availability_zone, **self.volume_params) volume_id = volume['id'] mock_notify.assert_not_called() self.assertIsNone(volume['encryption_key_id']) self.volume.create_volume(self.context, volume) self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end']), any_order=True) self.assertEqual({'_pool0': {'allocated_capacity_gb': 1}}, self.volume.stats['pools']) self.volume.delete_volume(self.context, volume) vol = db.volume_get(context.get_admin_context(read_deleted='yes'), volume_id) self.assertEqual(vol['status'], 'deleted') self.assert_notify_called(mock_notify, (['INFO', 'volume.create.start'], ['INFO', 'volume.create.end'], ['INFO', 'volume.delete.start'], ['INFO', 'volume.delete.end']), any_order=True) self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) def test_create_delete_volume_with_metadata(self): """Test volume can be created with metadata and deleted.""" test_meta = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, metadata=test_meta, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) self.assertEqual(test_meta, volume.metadata) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume_id) def test_delete_volume_frozen(self): service = tests_utils.create_service(self.context, {'frozen': True}) volume = tests_utils.create_volume(self.context, host=service.host) self.assertRaises(exception.InvalidInput, self.volume_api.delete, self.context, volume) def test_delete_volume_another_cluster_fails(self): """Test delete of volume from another cluster fails.""" self.volume.cluster = 'mycluster' volume = tests_utils.create_volume(self.context, status='available', size=1, host=CONF.host + 'fake', cluster_name=self.volume.cluster) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume.id) @mock.patch('cinder.db.volume_metadata_update') def test_create_volume_metadata(self, metadata_update): metadata = {'fake_key': 'fake_value'} metadata_update.return_value = metadata volume = tests_utils.create_volume(self.context, **self.volume_params) res = self.volume_api.create_volume_metadata(self.context, volume, metadata) metadata_update.assert_called_once_with(self.context, volume.id, metadata, False, common.METADATA_TYPES.user) self.assertEqual(metadata, res) @ddt.data('maintenance', 'uploading') def test_create_volume_metadata_maintenance(self, status): metadata = {'fake_key': 'fake_value'} volume = tests_utils.create_volume(self.context, **self.volume_params) volume['status'] = status self.assertRaises(exception.InvalidVolume, self.volume_api.create_volume_metadata, self.context, volume, metadata) def test_update_volume_metadata_with_metatype(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} test_meta2 = {'fake_key1': 'fake_value2'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) self.volume.create_volume(self.context, volume) # update user metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.user) self.assertEqual(test_meta2, result_meta) # create image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # update image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta2, False, common.METADATA_TYPES.image) self.assertEqual(test_meta2, result_meta) # update volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, self.volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) def test_update_volume_metadata_maintenance(self): """Test update volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' self.assertRaises(exception.InvalidVolume, self.volume_api.update_volume_metadata, self.context, volume, test_meta1, False, FAKE_METADATA_TYPE.fake_type) @mock.patch('cinder.db.volume_update') def test_update_with_ovo(self, volume_update): """Test update volume using oslo_versionedobject.""" volume = tests_utils.create_volume(self.context, **self.volume_params) updates = {'display_name': 'foobbar'} self.volume_api.update(self.context, volume, updates) volume_update.assert_called_once_with(self.context, volume.id, updates) self.assertEqual('foobbar', volume.display_name) def test_delete_volume_metadata_with_metatype(self): """Test delete volume metadata with different metadata type.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} test_meta2 = {'fake_key1': 'fake_value1'} FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) # delete user metadata associated with the volume. self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.user) self.assertEqual(test_meta2, db.volume_metadata_get(self.context, volume_id)) # create image metadata associated with the volume. result_meta = self.volume_api.update_volume_metadata( self.context, volume, test_meta1, False, common.METADATA_TYPES.image) self.assertEqual(test_meta1, result_meta) # delete image metadata associated with the volume. self.volume_api.delete_volume_metadata( self.context, volume, 'fake_key2', common.METADATA_TYPES.image) # parse the result to build the dict. rows = db.volume_glance_metadata_get(self.context, volume_id) result = {} for row in rows: result[row['key']] = row['value'] self.assertEqual(test_meta2, result) # delete volume metadata with invalid metadta type. self.assertRaises(exception.InvalidMetadataType, self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) def test_delete_volume_metadata_maintenance(self): """Test delete volume metadata in maintenance.""" FAKE_METADATA_TYPE = enum.Enum('METADATA_TYPES', 'fake_type') test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' self.assertRaises(exception.InvalidVolume, self.volume_api.delete_volume_metadata, self.context, volume, 'fake_key1', FAKE_METADATA_TYPE.fake_type) def test_accept_transfer_maintenance(self): """Test accept transfer in maintenance.""" test_meta1 = {'fake_key1': 'fake_value1', 'fake_key2': 'fake_value2'} volume = tests_utils.create_volume(self.context, metadata=test_meta1, **self.volume_params) volume['status'] = 'maintenance' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.accept_transfer, self.context, volume, None, None) @mock.patch.object(cinder.volume.api.API, 'list_availability_zones') def test_create_volume_uses_default_availability_zone(self, mock_list_az): """Test setting availability_zone correctly during volume create.""" mock_list_az.return_value = ({'name': 'az1', 'available': True}, {'name': 'az2', 'available': True}, {'name': 'default-az', 'available': True}) volume_api = cinder.volume.api.API() # Test backwards compatibility, default_availability_zone not set self.override_config('storage_availability_zone', 'az2') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertEqual('az2', volume['availability_zone']) self.override_config('default_availability_zone', 'default-az') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertEqual('default-az', volume['availability_zone']) @mock.patch('cinder.quota.QUOTAS.rollback', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.commit', new=mock.MagicMock()) @mock.patch('cinder.quota.QUOTAS.reserve', return_value=["RESERVATION"]) def test_create_volume_with_volume_type(self, _mock_reserve): """Test volume creation with default volume type.""" volume_api = cinder.volume.api.API() # Create volume with default volume type while default # volume type doesn't exist, volume_type_id should be NULL volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=self.vol_type) self.assertIsNone(volume['encryption_key_id']) # Create default volume type vol_type = conf_fixture.def_vol_type db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) # Create volume with default volume type volume = volume_api.create(self.context, 1, 'name', 'description') self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertIsNone(volume['encryption_key_id']) # Create volume with specific volume type vol_type = 'test' db.volume_type_create(context.get_admin_context(), {'name': vol_type, 'extra_specs': {}}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), vol_type) volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) def test_create_volume_with_multiattach_volume_type(self): """Test volume creation with multiattach volume type.""" elevated = context.get_admin_context() volume_api = cinder.volume.api.API() especs = dict(multiattach="<is> True") volume_types.create(elevated, "multiattach-type", especs, description="test-multiattach") foo = objects.VolumeType.get_by_name_or_id(elevated, "multiattach-type") vol = volume_api.create(self.context, 1, 'admin-vol', 'description', volume_type=foo) self.assertEqual(foo['id'], vol['volume_type_id']) self.assertTrue(vol['multiattach']) def test_create_volume_with_multiattach_flag(self): """Tests creating a volume with multiattach=True but no special type. This tests the pre 3.50 microversion behavior of being able to create a volume with the multiattach request parameter regardless of a multiattach-capable volume type. """ volume_api = cinder.volume.api.API() volume = volume_api.create( self.context, 1, 'name', 'description', multiattach=True, volume_type=self.vol_type) self.assertTrue(volume.multiattach) def _fail_multiattach_policy_authorize(self, policy): if policy == vol_policy.MULTIATTACH_POLICY: raise exception.PolicyNotAuthorized(action='Test') def test_create_volume_with_multiattach_volume_type_not_authorized(self): """Test policy unauthorized create with multiattach volume type.""" elevated = context.get_admin_context() volume_api = cinder.volume.api.API() especs = dict(multiattach="<is> True") volume_types.create(elevated, "multiattach-type", especs, description="test-multiattach") foo = objects.VolumeType.get_by_name_or_id(elevated, "multiattach-type") with mock.patch.object(self.context, 'authorize') as mock_auth: mock_auth.side_effect = self._fail_multiattach_policy_authorize self.assertRaises(exception.PolicyNotAuthorized, volume_api.create, self.context, 1, 'admin-vol', 'description', volume_type=foo) def test_create_volume_with_multiattach_flag_not_authorized(self): """Test policy unauthorized create with multiattach flag.""" volume_api = cinder.volume.api.API() with mock.patch.object(self.context, 'authorize') as mock_auth: mock_auth.side_effect = self._fail_multiattach_policy_authorize self.assertRaises(exception.PolicyNotAuthorized, volume_api.create, self.context, 1, 'name', 'description', multiattach=True) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_with_encrypted_volume_type_multiattach(self): ctxt = context.get_admin_context() cipher = 'aes-xts-plain64' key_size = 256 control_location = 'front-end' db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS', 'extra_specs': {'multiattach': '<is> True'}}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': control_location, 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') self.assertRaises(exception.InvalidVolume, volume_api.create, self.context, 1, 'name', 'description', volume_type=db_vol_type) @ddt.data({'cipher': 'blowfish-cbc', 'algo': 'blowfish', 'length': 32}, {'cipher': 'aes-xts-plain64', 'algo': 'aes', 'length': 256}) @ddt.unpack @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_with_encrypted_volume_types( self, cipher, algo, length): ctxt = context.get_admin_context() key_size = length control_location = 'front-end' db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': control_location, 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(ctxt, 'LUKS') volume = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) key_manager = volume_api.key_manager key = key_manager.get(self.context, volume['encryption_key_id']) self.assertEqual(key_size, len(key.get_encoded()) * 8) self.assertEqual(algo, key.algorithm) metadata = db.volume_encryption_metadata_get(self.context, volume.id) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) self.assertEqual(cipher, metadata.get('cipher')) self.assertEqual(key_size, metadata.get('key_size')) self.assertIsNotNone(volume['encryption_key_id']) def test_create_volume_with_provider_id(self): volume_params_with_provider_id = dict(provider_id=fake.PROVIDER_ID, **self.volume_params) volume = tests_utils.create_volume(self.context, **volume_params_with_provider_id) self.volume.create_volume(self.context, volume) self.assertEqual(fake.PROVIDER_ID, volume['provider_id']) def test_create_volume_with_admin_metadata(self): with mock.patch.object( self.volume.driver, 'create_volume', return_value={'admin_metadata': {'foo': 'bar'}}): volume = tests_utils.create_volume(self.user_context) self.volume.create_volume(self.user_context, volume) self.assertEqual({'foo': 'bar'}, volume['admin_metadata']) @mock.patch.object(key_manager, 'API', new=fake_keymgr.fake_api) def test_create_delete_volume_with_encrypted_volume_type(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) self.assertIsNotNone(volume.get('encryption_key_id', None)) self.assertEqual(db_vol_type.get('id'), volume['volume_type_id']) volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume['id'], {'status': 'available'}) self.volume_api.delete(self.context, volume) volume = objects.Volume.get_by_id(self.context, volume.id) while volume.status == 'available': # Must wait for volume_api delete request to process enough to # change the volume status. time.sleep(0.5) volume.refresh() self.assertEqual('deleting', volume['status']) db.volume_destroy(self.context, volume['id']) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_delete_encrypted_volume_fail_deleting_key(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_id = volume['id'] volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume_id, {'status': 'available'}) with mock.patch.object( self.volume_api.key_manager, 'delete', side_effect=Exception): self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) volume = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual("error_deleting", volume.status) volume.destroy() @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_delete_encrypted_volume_key_not_found(self): cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(self.context, {'id': fake.VOLUME_TYPE_ID, 'name': 'LUKS'}) db.volume_type_encryption_create( self.context, fake.VOLUME_TYPE_ID, {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(self.context, 'LUKS') volume = self.volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) volume_id = volume['id'] volume['host'] = 'fake_host' volume['status'] = 'available' db.volume_update(self.context, volume_id, {'status': 'available'}) with mock.patch.object( self.volume_api.key_manager, 'delete', side_effect=castellan_exception.ManagedObjectNotFoundError): self.volume_api.delete(self.context, volume) volume = objects.Volume.get_by_id(self.context, volume_id) self.assertEqual("deleting", volume.status) volume.destroy() def test_delete_busy_volume(self): """Test volume survives deletion if driver reports it as busy.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) with mock.patch.object(self.volume.driver, 'delete_volume', side_effect=exception.VolumeIsBusy( volume_name='fake') ) as mock_del_vol: self.volume.delete_volume(self.context, volume) volume_ref = db.volume_get(context.get_admin_context(), volume_id) self.assertEqual(volume_id, volume_ref.id) self.assertEqual("available", volume_ref.status) mock_del_vol.assert_called_once_with(volume) def test_unmanage_encrypted_volume_fails(self): volume = tests_utils.create_volume( self.context, encryption_key_id=fake.ENCRYPTION_KEY_ID, **self.volume_params) self.volume.create_volume(self.context, volume) manager = vol_manager.VolumeManager() self.assertRaises(exception.Invalid, manager.delete_volume, self.context, volume, unmanage_only=True) self.volume.delete_volume(self.context, volume) def test_get_volume_different_tenant(self): """Test can't get volume of another tenant when viewable_admin_meta.""" volume = tests_utils.create_volume(self.context, **self.volume_params) volume_id = volume['id'] self.volume.create_volume(self.context, volume) another_context = context.RequestContext('another_user_id', 'another_project_id', is_admin=False) self.assertNotEqual(another_context.project_id, self.context.project_id) volume_api = cinder.volume.api.API() self.assertRaises(exception.VolumeNotFound, volume_api.get, another_context, volume_id, viewable_admin_meta=True) self.assertEqual(volume_id, volume_api.get(self.context, volume_id)['id']) self.volume.delete_volume(self.context, volume) def test_get_all_limit_bad_value(self): """Test value of 'limit' is numeric and >= 0""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="A") self.assertRaises(exception.InvalidInput, volume_api.get_all, self.context, limit="-1") def test_get_all_tenants_volume_list(self): """Validate when the volume list for all tenants is returned""" volume_api = cinder.volume.api.API() with mock.patch.object(volume_api.db, 'volume_get_all_by_project') as by_project: with mock.patch.object(volume_api.db, 'volume_get_all') as get_all: db_volume = {'volume_type_id': fake.VOLUME_TYPE_ID, 'name': 'fake_name', 'host': 'fake_host', 'id': fake.VOLUME_ID} volume = fake_volume.fake_db_volume(**db_volume) by_project.return_value = [volume] get_all.return_value = [volume] volume_api.get_all(self.context, filters={'all_tenants': '0'}) self.assertTrue(by_project.called) by_project.called = False self.context.is_admin = False volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(by_project.called) # check for volume list of all tenants self.context.is_admin = True volume_api.get_all(self.context, filters={'all_tenants': '1'}) self.assertTrue(get_all.called) def test_delete_volume_in_error_extending(self): """Test volume can be deleted in error_extending stats.""" # create a volume volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) # delete 'error_extending' volume db.volume_update(self.context, volume['id'], {'status': 'error_extending'}) self.volume.delete_volume(self.context, volume) self.assertRaises(exception.NotFound, db.volume_get, self.context, volume['id']) @mock.patch.object(db.sqlalchemy.api, 'volume_get', side_effect=exception.VolumeNotFound( volume_id='12345678-1234-5678-1234-567812345678')) def test_delete_volume_not_found(self, mock_get_volume): """Test delete volume moves on if the volume does not exist.""" volume_id = '12345678-1234-5678-1234-567812345678' volume = objects.Volume(self.context, status='available', id=volume_id) self.volume.delete_volume(self.context, volume) self.assertTrue(mock_get_volume.called) @mock.patch('cinder.volume.drivers.lvm.LVMVolumeDriver.' 'create_volume_from_snapshot') def test_create_volume_from_snapshot(self, mock_create_from_snap): """Test volume can be created from a snapshot.""" volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src) snapshot_id = create_snapshot(volume_src['id'], size=volume_src['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) volume_dst = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, volume_dst) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_id, db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_snapshot(self.context, snapshot_obj) self.volume.delete_volume(self.context, volume_src) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_snapshot_with_types( self, _get_by_id, _get_flow): """Test volume create from snapshot with types including mistmatch.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='foo', extra_specs={'volume_backend_name': 'dev_2'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10, 'volume_type_id': biz_type.id} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol # Make sure the case of specifying a type that # doesn't match the snapshots type fails self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) # Make sure that trying to specify a type # when the snapshots type is None fails snapshot_obj.volume_type_id = None snapshot_obj.volume.volume_type_id = None snapshot_obj.volume.volume_type = None self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) snapshot_obj.volume_type_id = foo_type.id snapshot_obj.volume.volume_type_id = foo_type.id snapshot_obj.volume.volume_type = foo_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_source_with_types( self, _get_by_id, _get_flow): """Test volume create from source with types including mistmatch.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', extra_specs={'volume_backend_name': 'dev_2'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=0, volume_type_id=biz_type.id) source_vol.volume_type = biz_type self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) # Make sure that trying to specify a type # when the source type is None fails source_vol.volume_type_id = None source_vol.volume_type = None self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) source_vol.volume_type_id = biz_type.id source_vol.volume_type = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=biz_type, source_volume=source_vol) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_volume_from_source_with_same_backend( self, _get_by_id, _get_flow): """Test volume create from source with type mismatch same backend.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), updated_at=None, extra_specs={'volume_backend_name': 'dev_1'}, is_public=True, deleted_at=None, description=None) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), updated_at=None, extra_specs={'volume_backend_name': 'dev_1'}, is_public=True, deleted_at=None, description=None) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) @mock.patch('cinder.volume.flows.api.create_volume.get_flow') @mock.patch('cinder.objects.volume.Volume.get_by_id') def test_create_from_source_and_snap_only_one_backend( self, _get_by_id, _get_flow): """Test create from source and snap with type mismatch one backend.""" volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 40, 5, 408232), updated_at=None, extra_specs={'some_key': 3}, is_public=True, deleted_at=None, description=None) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', qos_specs_id=None, deleted=False, created_at=datetime.datetime(2015, 5, 8, 0, 20, 5, 408232), updated_at=None, extra_specs={'some_other_key': 4}, is_public=True, deleted_at=None, description=None) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=10, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol with mock.patch('cinder.db.service_get_all') as mock_get_service, \ mock.patch.object(volume_api, 'list_availability_zones') as mock_get_azs: mock_get_service.return_value = [ {'host': 'foo', 'uuid': 'a3a593da-7f8d-4bb7-8b4c-f2bc1e0b4824'}] mock_get_azs.return_value = {} volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=source_vol) volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, snapshot=snapshot_obj) def _test_create_from_source_snapshot_encryptions( self, is_snapshot=False): volume_api = cinder.volume.api.API() foo_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE_ID, name='foo', extra_specs={'volume_backend_name': 'dev_1'}) biz_type = fake_volume.fake_volume_type_obj( self.context, id=fake.VOLUME_TYPE2_ID, name='biz', extra_specs={'volume_backend_name': 'dev_1'}) source_vol = fake_volume.fake_volume_obj( self.context, id=fake.VOLUME_ID, status='available', volume_size=1, volume_type_id=biz_type.id) source_vol.volume_type = biz_type snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 1, 'volume_type_id': biz_type['id']} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) snapshot_obj.volume = source_vol with mock.patch.object( cinder.volume.volume_types, 'volume_types_encryption_changed') as mock_encryption_changed: mock_encryption_changed.return_value = True self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', volume_type=foo_type, source_volume=( source_vol if not is_snapshot else None), snapshot=snapshot_obj if is_snapshot else None) def test_create_from_source_encryption_changed(self): self._test_create_from_source_snapshot_encryptions() def test_create_from_snapshot_encryption_changed(self): self._test_create_from_source_snapshot_encryptions(is_snapshot=True) def _mock_synchronized(self, name, *s_args, **s_kwargs): def inner_sync1(f): def inner_sync2(*args, **kwargs): self.called.append('lock-%s' % (name)) ret = f(*args, **kwargs) self.called.append('unlock-%s' % (name)) return ret return inner_sync2 return inner_sync1 def _fake_execute(self, *cmd, **kwargs): pass @mock.patch.object(coordination.Coordinator, 'get_lock') @mock.patch.object(fake_driver.FakeLoggingVolumeDriver, 'create_volume_from_snapshot') def test_create_volume_from_snapshot_check_locks( self, mock_lvm_create, mock_lock): orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) # no lock self.volume.create_volume(self.context, src_vol) snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, snapshot_obj) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, **self.volume_params) admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, dst_vol, request_spec={'snapshot_id': snap_id}) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) self.assertEqual(dst_vol.id, db.volume_get(admin_ctxt, dst_vol.id).id) self.assertEqual(snap_id, db.volume_get(admin_ctxt, dst_vol.id).snapshot_id) # locked self.volume.delete_volume(self.context, dst_vol) mock_lock.assert_called_with('%s-delete_volume' % dst_vol.id) # locked self.volume.delete_snapshot(self.context, snapshot_obj) mock_lock.assert_called_with('%s-delete_snapshot' % snap_id) # locked self.volume.delete_volume(self.context, src_vol) mock_lock.assert_called_with('%s-delete_volume' % src_vol.id) self.assertTrue(mock_lvm_create.called) @mock.patch.object(coordination.Coordinator, 'get_lock') def test_create_volume_from_volume_check_locks(self, mock_lock): # mock the synchroniser so we can record events self.mock_object(utils, 'execute', self._fake_execute) orig_flow = engine.ActionEngine.run def mock_flow_run(*args, **kwargs): # ensure the lock has been taken mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) # now proceed with the flow. ret = orig_flow(*args, **kwargs) return ret # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol) self.assertEqual(0, mock_lock.call_count) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) dst_vol_id = dst_vol['id'] admin_ctxt = context.get_admin_context() # mock the flow runner so we can do some checks self.mock_object(engine.ActionEngine, 'run', mock_flow_run) # locked self.volume.create_volume(self.context, dst_vol, request_spec={'source_volid': src_vol_id}) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) self.assertEqual(dst_vol_id, db.volume_get(admin_ctxt, dst_vol_id).id) self.assertEqual(src_vol_id, db.volume_get(admin_ctxt, dst_vol_id).source_volid) # locked self.volume.delete_volume(self.context, dst_vol) mock_lock.assert_called_with('%s-delete_volume' % dst_vol_id) # locked self.volume.delete_volume(self.context, src_vol) mock_lock.assert_called_with('%s-delete_volume' % src_vol_id) def _raise_metadata_copy_failure(self, method, dst_vol): # MetadataCopyFailure exception will be raised if DB service is Down # while copying the volume glance metadata with mock.patch.object(db, method) as mock_db: mock_db.side_effect = exception.MetadataCopyFailure( reason="Because of DB service down.") self.assertRaises(exception.MetadataCopyFailure, self.volume.create_volume, self.context, dst_vol) # ensure that status of volume is 'error' vol = db.volume_get(self.context, dst_vol.id) self.assertEqual('error', vol['status']) # cleanup resource db.volume_destroy(self.context, dst_vol.id) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_from_volume_to_volume, self.context, src_vol_id, dst_vol['id']) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from source volume dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_from_volume_to_volume', dst_vol) # cleanup resource db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_raise_metadata_copy_failure( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) # create volume from snapshot snapshot_id = create_snapshot(src_vol['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self._raise_metadata_copy_failure( 'volume_glance_metadata_copy_to_volume', dst_vol) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) @mock.patch('cinder.utils.execute') def test_create_volume_from_snapshot_with_glance_volume_metadata_none( self, mock_execute): # create source volume mock_execute.return_value = None src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) # set bootable flag of volume to True db.volume_update(self.context, src_vol['id'], {'bootable': True}) volume = db.volume_get(self.context, src_vol_id) # create snapshot of volume snapshot_id = create_snapshot(volume['id'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snapshot_id) self.volume.create_snapshot(self.context, snapshot_obj) # ensure that status of snapshot is 'available' self.assertEqual(fields.SnapshotStatus.AVAILABLE, snapshot_obj.status) # create volume from snapshot dst_vol = tests_utils.create_volume(self.context, snapshot_id=snapshot_id, **self.volume_params) self.volume.create_volume(self.context, dst_vol) self.assertRaises(exception.GlanceMetadataNotFound, db.volume_glance_metadata_copy_to_volume, self.context, dst_vol['id'], snapshot_id) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource snapshot_obj.destroy() db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) @ddt.data({'connector_class': os_brick.initiator.connectors.iscsi.ISCSIConnector, 'rekey_supported': True, 'already_encrypted': 'yes'}, {'connector_class': os_brick.initiator.connectors.iscsi.ISCSIConnector, 'rekey_supported': True, 'already_encrypted': 'no'}, {'connector_class': os_brick.initiator.connectors.rbd.RBDConnector, 'rekey_supported': False, 'already_encrypted': 'no'}) @ddt.unpack @mock.patch('cinder.volume.volume_utils.delete_encryption_key') @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask._setup_encryption_keys') @mock.patch('cinder.db.sqlalchemy.api.volume_encryption_metadata_get') @mock.patch('cinder.image.image_utils.qemu_img_info') @mock.patch('cinder.volume.driver.VolumeDriver._detach_volume') @mock.patch('cinder.volume.driver.VolumeDriver._attach_volume') @mock.patch('cinder.utils.brick_get_connector_properties') @mock.patch('cinder.utils.execute') def test_create_volume_from_volume_with_enc( self, mock_execute, mock_brick_gcp, mock_at, mock_det, mock_qemu_img_info, mock_enc_metadata_get, mock_setup_enc_keys, mock_del_enc_key, connector_class=None, rekey_supported=None, already_encrypted=None): # create source volume mock_execute.return_value = ('', '') mock_enc_metadata_get.return_value = {'cipher': 'aes-xts-plain64', 'key_size': 256, 'provider': 'luks'} mock_setup_enc_keys.return_value = ( 'qwert', 'asdfg', fake.ENCRYPTION_KEY2_ID) params = {'status': 'creating', 'size': 1, 'host': CONF.host, 'encryption_key_id': fake.ENCRYPTION_KEY_ID} src_vol = tests_utils.create_volume(self.context, **params) src_vol_id = src_vol['id'] self.volume.create_volume(self.context, src_vol) db.volume_update(self.context, src_vol['id'], {'encryption_key_id': fake.ENCRYPTION_KEY_ID}) # create volume from source volume params['encryption_key_id'] = fake.ENCRYPTION_KEY2_ID attach_info = { 'connector': connector_class(None), 'device': {'path': '/some/device/thing'}} mock_at.return_value = (attach_info, src_vol) img_info = imageutils.QemuImgInfo() if already_encrypted: # defaults to None when not encrypted img_info.encrypted = 'yes' img_info.file_format = 'raw' mock_qemu_img_info.return_value = img_info dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **params) self.volume.create_volume(self.context, dst_vol) # ensure that status of volume is 'available' vol = db.volume_get(self.context, dst_vol['id']) self.assertEqual('available', vol['status']) # cleanup resource db.volume_destroy(self.context, src_vol_id) db.volume_destroy(self.context, dst_vol['id']) if rekey_supported: mock_setup_enc_keys.assert_called_once_with( mock.ANY, src_vol, {'key_size': 256, 'provider': 'luks', 'cipher': 'aes-xts-plain64'} ) if already_encrypted: mock_execute.assert_called_once_with( 'cryptsetup', 'luksChangeKey', '/some/device/thing', '--force-password', log_errors=processutils.LOG_ALL_ERRORS, process_input='qwert\nasdfg\n', run_as_root=True) else: mock_execute.assert_called_once_with( 'cryptsetup', '--batch-mode', 'luksFormat', '--type', 'luks1', '--cipher', 'aes-xts-plain64', '--key-size', '256', '--key-file=-', '/some/device/thing', process_input='asdfg', run_as_root=True) mock_del_enc_key.assert_called_once_with(mock.ANY, # context mock.ANY, # keymgr fake.ENCRYPTION_KEY2_ID) else: mock_setup_enc_keys.assert_not_called() mock_execute.assert_not_called() mock_del_enc_key.assert_not_called() mock_at.assert_called() mock_det.assert_called() @mock.patch.object(key_manager, 'API', fake_keymgr.fake_api) def test_create_volume_from_snapshot_with_encryption(self): """Test volume can be created from a snapshot of an encrypted volume""" ctxt = context.get_admin_context() cipher = 'aes-xts-plain64' key_size = 256 db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) volume_api = cinder.volume.api.API() db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) db.volume_update(self.context, volume_src['id'], {'host': 'fake_host@fake_backend', 'status': 'available'}) volume_src = objects.Volume.get_by_id(self.context, volume_src['id']) snapshot_ref = volume_api.create_snapshot_force(self.context, volume_src, 'name', 'description') snapshot_ref['status'] = fields.SnapshotStatus.AVAILABLE # status must be available volume_dst = volume_api.create(self.context, 1, 'name', 'description', snapshot=snapshot_ref) self.assertEqual(volume_dst['id'], db.volume_get( context.get_admin_context(), volume_dst['id']).id) self.assertEqual(snapshot_ref['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).snapshot_id) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) key_manager = volume_api.key_manager # must use *same* key manager volume_src_key = key_manager.get(self.context, volume_src['encryption_key_id']) volume_dst_key = key_manager.get(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_create_volume_from_encrypted_volume(self): """Test volume can be created from an encrypted volume.""" self.mock_object(key_manager, 'API', fake_keymgr.fake_api) cipher = 'aes-xts-plain64' key_size = 256 volume_api = cinder.volume.api.API() ctxt = context.get_admin_context() db.volume_type_create(ctxt, {'id': '61298380-0c12-11e3-bfd6-4b48424183be', 'name': 'LUKS'}) db.volume_type_encryption_create( ctxt, '61298380-0c12-11e3-bfd6-4b48424183be', {'control_location': 'front-end', 'provider': ENCRYPTION_PROVIDER, 'cipher': cipher, 'key_size': key_size}) db_vol_type = db.volume_type_get_by_name(context.get_admin_context(), 'LUKS') volume_src = volume_api.create(self.context, 1, 'name', 'description', volume_type=db_vol_type) db.volume_update(self.context, volume_src['id'], {'host': 'fake_host@fake_backend', 'status': 'available'}) volume_src = objects.Volume.get_by_id(self.context, volume_src['id']) volume_dst = volume_api.create(self.context, 1, 'name', 'description', source_volume=volume_src) self.assertEqual(volume_dst['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).id) self.assertEqual(volume_src['id'], db.volume_get(context.get_admin_context(), volume_dst['id']).source_volid) # ensure encryption keys match self.assertIsNotNone(volume_src['encryption_key_id']) self.assertIsNotNone(volume_dst['encryption_key_id']) km = volume_api.key_manager # must use *same* key manager volume_src_key = km.get(self.context, volume_src['encryption_key_id']) volume_dst_key = km.get(self.context, volume_dst['encryption_key_id']) self.assertEqual(volume_src_key, volume_dst_key) def test_delete_invalid_status_fails(self): self.volume_params['status'] = 'invalid1234' volume = tests_utils.create_volume(self.context, **self.volume_params) vol_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, vol_api.delete, self.context, volume) def test_create_volume_from_snapshot_fail_bad_size(self): """Test volume can't be created from snapshot with bad volume size.""" volume_api = cinder.volume.api.API() snapshot = {'id': fake.SNAPSHOT_ID, 'status': fields.SnapshotStatus.AVAILABLE, 'volume_size': 10} snapshot_obj = fake_snapshot.fake_snapshot_obj(self.context, **snapshot) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot_obj) def test_create_volume_from_snapshot_fail_wrong_az(self): """Test volume can't be created from snapshot in a different az.""" volume_api = cinder.volume.api.API() def fake_list_availability_zones(enable_cache=False): return ({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True}) self.mock_object(volume_api, 'list_availability_zones', fake_list_availability_zones) volume_src = tests_utils.create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src) snapshot = create_snapshot(volume_src['id']) self.volume.create_snapshot(self.context, snapshot) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', snapshot=snapshot, availability_zone='nova') def test_create_volume_with_invalid_exclusive_options(self): """Test volume create with multiple exclusive options fails.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, 1, 'name', 'description', snapshot=fake.SNAPSHOT_ID, image_id=fake.IMAGE_ID, source_volume=fake.VOLUME_ID) def test_reserve_volume_success(self): volume = tests_utils.create_volume(self.context, status='available') cinder.volume.api.API().reserve_volume(self.context, volume) volume_db = db.volume_get(self.context, volume.id) self.assertEqual('attaching', volume_db.status) db.volume_destroy(self.context, volume.id) def test_reserve_volume_in_attaching(self): self._test_reserve_volume_bad_status('attaching') def test_reserve_volume_in_maintenance(self): self._test_reserve_volume_bad_status('maintenance') def _test_reserve_volume_bad_status(self, status): volume = tests_utils.create_volume(self.context, status=status) self.assertRaises(exception.InvalidVolume, cinder.volume.api.API().reserve_volume, self.context, volume) db.volume_destroy(self.context, volume.id) def test_attachment_reserve_with_bootable_volume(self): # test the private _attachment_reserve method with a bootable, # in-use, multiattach volume. instance_uuid = fake.UUID1 volume = tests_utils.create_volume(self.context, status='in-use') tests_utils.attach_volume(self.context, volume.id, instance_uuid, 'attached_host', 'mountpoint', mode='rw') volume.multiattach = True volume.bootable = True attachment = self.volume_api._attachment_reserve( self.context, volume, instance_uuid) self.assertEqual(attachment.attach_status, 'reserved') def test_attachment_reserve_conditional_update_attach_race(self): # Tests a scenario where two instances are racing to attach the # same multiattach=False volume. One updates the volume status to # "reserved" but the other fails the conditional update which is # then validated to not be the same instance that is already attached # to the multiattach=False volume which triggers a failure. volume = tests_utils.create_volume(self.context) # Assert that we're not dealing with a multiattach volume and that # it does not have any existing attachments. self.assertFalse(volume.multiattach) self.assertEqual(0, len(volume.volume_attachment)) # Attach the first instance which is OK and should update the volume # status to 'reserved'. self.volume_api._attachment_reserve(self.context, volume, fake.UUID1) # Try attaching a different instance to the same volume which should # fail. ex = self.assertRaises(exception.InvalidVolume, self.volume_api._attachment_reserve, self.context, volume, fake.UUID2) self.assertIn("status must be available or downloading", six.text_type(ex)) def test_attachment_reserve_with_instance_uuid_error_volume(self): # Tests that trying to create an attachment (with an instance_uuid # provided) on a volume that's not 'available' or 'downloading' status # will fail if the volume does not have any attachments, similar to how # the volume reserve action works. volume = tests_utils.create_volume(self.context, status='error') # Assert that we're not dealing with a multiattach volume and that # it does not have any existing attachments. self.assertFalse(volume.multiattach) self.assertEqual(0, len(volume.volume_attachment)) # Try attaching an instance to the volume which should fail based on # the volume status. ex = self.assertRaises(exception.InvalidVolume, self.volume_api._attachment_reserve, self.context, volume, fake.UUID1) self.assertIn("status must be available or downloading", six.text_type(ex)) def test_unreserve_volume_success_in_use(self): UUID = six.text_type(uuid.uuid4()) volume = tests_utils.create_volume(self.context, status='attaching') tests_utils.attach_volume(self.context, volume.id, UUID, 'attached_host', 'mountpoint', mode='rw') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('in-use', db_volume.status) def test_unreserve_volume_success_available(self): volume = tests_utils.create_volume(self.context, status='attaching') cinder.volume.api.API().unreserve_volume(self.context, volume) db_volume = db.volume_get(self.context, volume.id) self.assertEqual('available', db_volume.status) def test_multi_node(self): # TODO(termie): Figure out how to test with two nodes, # each of them having a different FLAG for storage_node # This will allow us to test cross-node interactions pass def test_cannot_delete_volume_in_use(self): """Test volume can't be deleted in in-use status.""" self._test_cannot_delete_volume('in-use') def test_cannot_delete_volume_maintenance(self): """Test volume can't be deleted in maintenance status.""" self._test_cannot_delete_volume('maintenance') def _test_cannot_delete_volume(self, status): """Test volume can't be deleted in invalid stats.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, CONF.host, status=status) # 'in-use' status raises InvalidVolume self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) # clean up self.volume.delete_volume(self.context, volume) def test_force_delete_volume(self): """Test volume can be forced to delete.""" # create a volume and assign to host self.volume_params['status'] = 'error_deleting' volume = tests_utils.create_volume(self.context, **self.volume_params) # 'error_deleting' volumes can't be deleted self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume) # delete with force self.volume_api.delete(self.context, volume, force=True) # status is deleting volume = objects.Volume.get_by_id(context.get_admin_context(), volume.id) self.assertEqual('deleting', volume.status) # clean up self.volume.delete_volume(self.context, volume) def test_cannot_force_delete_attached_volume(self): """Test volume can't be force delete in attached state.""" volume = tests_utils.create_volume(self.context, CONF.host, status='in-use', attach_status= fields.VolumeAttachStatus.ATTACHED) self.assertRaises(exception.InvalidVolume, self.volume_api.delete, self.context, volume, force=True) db.volume_destroy(self.context, volume.id) def test__revert_to_snapshot_generic_failed(self): fake_volume = tests_utils.create_volume(self.context, status='available') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) with mock.patch.object( self.volume.driver, '_create_temp_volume_from_snapshot') as mock_temp, \ mock.patch.object( self.volume.driver, 'delete_volume') as mock_driver_delete, \ mock.patch.object( self.volume, '_copy_volume_data') as mock_copy: temp_volume = tests_utils.create_volume(self.context, status='available') mock_copy.side_effect = [exception.VolumeDriverException('error')] mock_temp.return_value = temp_volume self.assertRaises(exception.VolumeDriverException, self.volume._revert_to_snapshot_generic, self.context, fake_volume, fake_snapshot) mock_copy.assert_called_once_with( self.context, temp_volume, fake_volume) mock_driver_delete.assert_called_once_with(temp_volume) def test__revert_to_snapshot_generic(self): fake_volume = tests_utils.create_volume(self.context, status='available') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id) with mock.patch.object( self.volume.driver, '_create_temp_volume_from_snapshot') as mock_temp,\ mock.patch.object( self.volume.driver, 'delete_volume') as mock_driver_delete,\ mock.patch.object( self.volume, '_copy_volume_data') as mock_copy: temp_volume = tests_utils.create_volume(self.context, status='available') mock_temp.return_value = temp_volume self.volume._revert_to_snapshot_generic( self.context, fake_volume, fake_snapshot) mock_copy.assert_called_once_with( self.context, temp_volume, fake_volume) mock_driver_delete.assert_called_once_with(temp_volume) @ddt.data({'driver_error': True}, {'driver_error': False}) @ddt.unpack def test__revert_to_snapshot(self, driver_error): mock.patch.object(self.volume, '_notify_about_snapshot_usage') with mock.patch.object(self.volume.driver, 'revert_to_snapshot') as driver_revert, \ mock.patch.object(self.volume, '_notify_about_volume_usage'), \ mock.patch.object(self.volume, '_notify_about_snapshot_usage'),\ mock.patch.object(self.volume, '_revert_to_snapshot_generic') as generic_revert: if driver_error: driver_revert.side_effect = [NotImplementedError] else: driver_revert.return_value = None self.volume._revert_to_snapshot(self.context, {}, {}) driver_revert.assert_called_once_with(self.context, {}, {}) if driver_error: generic_revert.assert_called_once_with(self.context, {}, {}) @ddt.data({}, {'has_snapshot': True}, {'use_temp_snapshot': True}, {'use_temp_snapshot': True, 'has_snapshot': True}) @ddt.unpack def test_revert_to_snapshot(self, has_snapshot=False, use_temp_snapshot=False): fake_volume = tests_utils.create_volume(self.context, status='reverting', project_id='123', size=2) fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume['id'], status='restoring', volume_size=1) with mock.patch.object(self.volume, '_revert_to_snapshot') as _revert,\ mock.patch.object(self.volume, '_create_backup_snapshot') as _create_snapshot,\ mock.patch.object(self.volume, 'delete_snapshot') as _delete_snapshot, \ mock.patch.object(self.volume.driver, 'snapshot_revert_use_temp_snapshot') as \ _use_temp_snap: _revert.return_value = None _use_temp_snap.return_value = use_temp_snapshot if has_snapshot: _create_snapshot.return_value = {'id': 'fake_snapshot'} else: _create_snapshot.return_value = None self.volume.revert_to_snapshot(self.context, fake_volume, fake_snapshot) _revert.assert_called_once_with(self.context, fake_volume, fake_snapshot) if not use_temp_snapshot: _create_snapshot.assert_not_called() else: _create_snapshot.assert_called_once_with(self.context, fake_volume) if use_temp_snapshot and has_snapshot: _delete_snapshot.assert_called_once_with( self.context, {'id': 'fake_snapshot'}, handle_quota=False) else: _delete_snapshot.assert_not_called() fake_volume.refresh() fake_snapshot.refresh() self.assertEqual('available', fake_volume['status']) self.assertEqual('available', fake_snapshot['status']) self.assertEqual(2, fake_volume['size']) def test_revert_to_snapshot_failed(self): fake_volume = tests_utils.create_volume(self.context, status='reverting', project_id='123', size=2) fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume['id'], status='restoring', volume_size=1) with mock.patch.object(self.volume, '_revert_to_snapshot') as _revert, \ mock.patch.object(self.volume, '_create_backup_snapshot'), \ mock.patch.object(self.volume, 'delete_snapshot') as _delete_snapshot: _revert.side_effect = [exception.VolumeDriverException( message='fake_message')] self.assertRaises(exception.VolumeDriverException, self.volume.revert_to_snapshot, self.context, fake_volume, fake_snapshot) _revert.assert_called_once_with(self.context, fake_volume, fake_snapshot) _delete_snapshot.assert_not_called() fake_volume.refresh() fake_snapshot.refresh() self.assertEqual('error', fake_volume['status']) self.assertEqual('available', fake_snapshot['status']) self.assertEqual(2, fake_volume['size']) def test_cannot_revert_to_snapshot_in_use(self): """Test volume can't be reverted to snapshot in in-use status.""" fake_volume = tests_utils.create_volume(self.context, status='in-use') fake_snapshot = tests_utils.create_snapshot(self.context, fake_volume.id, status='available') self.assertRaises(exception.InvalidVolume, self.volume_api.revert_to_snapshot, self.context, fake_volume, fake_snapshot) def test_cannot_delete_volume_with_snapshots(self): """Test volume can't be deleted with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume) self.volume.delete_snapshot(self.context, snapshot) self.volume.delete_volume(self.context, volume) def test_can_delete_errored_snapshot(self): """Test snapshot can be created and deleted.""" volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.ERROR) self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) self.volume.delete_volume(self.context, volume) def test_create_snapshot_set_worker(self): volume = tests_utils.create_volume(self.context) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.CREATING) self.volume.create_snapshot(self.context, snapshot) volume.set_worker.assert_called_once_with() def test_cannot_delete_snapshot_with_bad_status(self): volume = tests_utils.create_volume(self.context, CONF.host) snapshot = create_snapshot(volume.id, size=volume['size'], ctxt=self.context, status=fields.SnapshotStatus.CREATING) self.assertRaises(exception.InvalidSnapshot, self.volume_api.delete_snapshot, self.context, snapshot) snapshot.status = fields.SnapshotStatus.ERROR snapshot.save() self.volume_api.delete_snapshot(self.context, snapshot) self.assertEqual(fields.SnapshotStatus.DELETING, snapshot.status) self.volume.delete_volume(self.context, volume) @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def _do_test_create_volume_with_size(self, size, *_unused_quota_mocks): volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, size, 'name', 'description', volume_type=self.vol_type) self.assertEqual(int(size), volume['size']) def test_create_volume_int_size(self): """Test volume creation with int size.""" self._do_test_create_volume_with_size(2) def test_create_volume_string_size(self): """Test volume creation with string size.""" self._do_test_create_volume_with_size('2') @mock.patch.object(QUOTAS, "rollback") @mock.patch.object(QUOTAS, "commit") @mock.patch.object(QUOTAS, "reserve", return_value=["RESERVATION"]) def test_create_volume_with_bad_size(self, *_unused_quota_mocks): volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '2Gb', 'name', 'description') def test_create_volume_with_float_fails(self): """Test volume creation with invalid float size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '1.5', 'name', 'description') def test_create_volume_with_zero_size_fails(self): """Test volume creation with string size.""" volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidInput, volume_api.create, self.context, '0', 'name', 'description') def test_begin_detaching_fails_available(self): volume_api = cinder.volume.api.API() volume = tests_utils.create_volume(self.context, status='available') # Volume status is 'available'. self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'status': 'in-use', 'attach_status': fields.VolumeAttachStatus.DETACHED}) # Should raise an error since not attached self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_update(self.context, volume.id, {'attach_status': fields.VolumeAttachStatus.ATTACHED}) # Ensure when attached no exception raised volume_api.begin_detaching(self.context, volume) volume_api.update(self.context, volume, {'status': 'maintenance'}) self.assertRaises(exception.InvalidVolume, volume_api.begin_detaching, self.context, volume) db.volume_destroy(self.context, volume.id) def test_begin_roll_detaching_volume(self): """Test begin_detaching and roll_detaching functions.""" instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, **self.volume_params) attachment = db.volume_attach(self.context, {'volume_id': volume['id'], 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb') volume_api = cinder.volume.api.API() volume_api.begin_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("detaching", volume['status']) volume_api.roll_detaching(self.context, volume) volume = volume_api.get(self.context, volume['id']) self.assertEqual("in-use", volume['status']) def test_volume_api_update(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} volume_api.update(self.context, volume, update_dict) # read changes from db vol = db.volume_get(context.get_admin_context(), volume['id']) self.assertEqual('test update name', vol['display_name']) def test_volume_api_update_maintenance(self): # create a raw vol volume = tests_utils.create_volume(self.context, **self.volume_params) volume['status'] = 'maintenance' # use volume.api to update name volume_api = cinder.volume.api.API() update_dict = {'display_name': 'test update name'} self.assertRaises(exception.InvalidVolume, volume_api.update, self.context, volume, update_dict) def test_volume_api_get_list_volumes_image_metadata(self): """Test get_list_volumes_image_metadata in volume API.""" ctxt = context.get_admin_context() db.volume_create(ctxt, {'id': 'fake1', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, 'fake1', 'key1', 'value1') db.volume_glance_metadata_create(ctxt, 'fake1', 'key2', 'value2') db.volume_create(ctxt, {'id': 'fake2', 'status': 'available', 'host': 'test', 'provider_location': '', 'size': 1, 'volume_type_id': fake.VOLUME_TYPE_ID}) db.volume_glance_metadata_create(ctxt, 'fake2', 'key3', 'value3') db.volume_glance_metadata_create(ctxt, 'fake2', 'key4', 'value4') volume_api = cinder.volume.api.API() results = volume_api.get_list_volumes_image_metadata(ctxt, ['fake1', 'fake2']) expect_results = {'fake1': {'key1': 'value1', 'key2': 'value2'}, 'fake2': {'key3': 'value3', 'key4': 'value4'}} self.assertEqual(expect_results, results) @mock.patch.object(QUOTAS, 'limit_check') @mock.patch.object(QUOTAS, 'reserve') def test_extend_attached_volume(self, reserve, limit_check): volume = self._create_volume(self.context, size=2, status='available', host=CONF.host) volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api._extend, self.context, volume, 3, attached=True) db.volume_update(self.context, volume.id, {'status': 'in-use'}) volume.refresh() reserve.return_value = ["RESERVATION"] volume_api._extend(self.context, volume, 3, attached=True) volume.refresh() self.assertEqual('extending', volume.status) self.assertEqual('in-use', volume.previous_status) reserve.assert_called_once_with(self.context, gigabytes=1, gigabytes___DEFAULT__=1, project_id=volume.project_id) limit_check.side_effect = None reserve.side_effect = None db.volume_update(self.context, volume.id, {'status': 'in-use'}) volume_api.scheduler_rpcapi = mock.MagicMock() volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() volume_api._extend(self.context, volume, 3, attached=True) request_spec = { 'volume_properties': volume, 'volume_type': self.vol_type, 'volume_id': volume.id } volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( self.context, volume, 3, ["RESERVATION"], request_spec) # clean up self.volume.delete_volume(self.context, volume) @mock.patch.object(QUOTAS, 'limit_check') @mock.patch.object(QUOTAS, 'reserve') def test_extend_volume(self, reserve, limit_check): """Test volume can be extended at API level.""" # create a volume and assign to host volume = self._create_volume(self.context, size=2, status='in-use', host=CONF.host) volume_api = cinder.volume.api.API() # Extend fails when status != available self.assertRaises(exception.InvalidVolume, volume_api._extend, self.context, volume, 3) db.volume_update(self.context, volume.id, {'status': 'available'}) volume.refresh() # Extend fails when new_size < orig_size self.assertRaises(exception.InvalidInput, volume_api._extend, self.context, volume, 1) # Extend fails when new_size == orig_size self.assertRaises(exception.InvalidInput, volume_api._extend, self.context, volume, 2) # works when new_size > orig_size reserve.return_value = ["RESERVATION"] volume_api._extend(self.context, volume, 3) volume.refresh() self.assertEqual('extending', volume.status) self.assertEqual('available', volume.previous_status) reserve.assert_called_once_with(self.context, gigabytes=1, gigabytes___DEFAULT__=1, project_id=volume.project_id) # Test the quota exceeded db.volume_update(self.context, volume.id, {'status': 'available'}) reserve.side_effect = exception.OverQuota(overs=['gigabytes'], quotas={'gigabytes': 20}, usages={'gigabytes': {'reserved': 5, 'in_use': 15}}) self.assertRaises(exception.VolumeSizeExceedsAvailableQuota, volume_api._extend, self.context, volume, 3) db.volume_update(self.context, volume.id, {'status': 'available'}) limit_check.side_effect = exception.OverQuota( overs=['per_volume_gigabytes'], quotas={'per_volume_gigabytes': 2}) self.assertRaises(exception.VolumeSizeExceedsLimit, volume_api._extend, self.context, volume, 3) # Test scheduler path limit_check.side_effect = None reserve.side_effect = None db.volume_update(self.context, volume.id, {'status': 'available'}) volume_api.scheduler_rpcapi = mock.MagicMock() volume_api.scheduler_rpcapi.extend_volume = mock.MagicMock() volume_api._extend(self.context, volume, 3) request_spec = { 'volume_properties': volume, 'volume_type': self.vol_type, 'volume_id': volume.id } volume_api.scheduler_rpcapi.extend_volume.assert_called_once_with( self.context, volume, 3, ["RESERVATION"], request_spec) # clean up self.volume.delete_volume(self.context, volume) def test_extend_volume_driver_not_initialized(self): """Test volume can be extended at API level.""" # create a volume and assign to host fake_reservations = ['RESERVATION'] volume = tests_utils.create_volume(self.context, size=2, status='available', host=CONF.host) self.volume.create_volume(self.context, volume) self.volume.driver._initialized = False self.assertRaises(exception.DriverNotInitialized, self.volume.extend_volume, self.context, volume, 3, fake_reservations) volume.refresh() self.assertEqual('error_extending', volume.status) # lets cleanup the mess. self.volume.driver._initialized = True self.volume.delete_volume(self.context, volume) def _test_extend_volume_manager_fails_with_exception(self, volume): fake_reservations = ['RESERVATION'] # Test driver exception with mock.patch.object( self.volume.driver, 'extend_volume', side_effect=exception.CinderException('fake exception')): with mock.patch.object( self.volume.message_api, 'create') as mock_create: volume['status'] = 'extending' self.volume.extend_volume(self.context, volume, '4', fake_reservations) volume.refresh() self.assertEqual(2, volume.size) self.assertEqual('error_extending', volume.status) mock_create.assert_called_once_with( self.context, message_field.Action.EXTEND_VOLUME, resource_uuid=volume.id, detail=message_field.Detail.DRIVER_FAILED_EXTEND) @mock.patch('cinder.compute.API') def _test_extend_volume_manager_successful(self, volume, nova_api): """Test volume can be extended at the manager level.""" def fake_extend(volume, new_size): volume['size'] = new_size nova_extend_volume = nova_api.return_value.extend_volume fake_reservations = ['RESERVATION'] orig_status = volume.status # Test driver success with mock.patch.object(self.volume.driver, 'extend_volume') as extend_volume: with mock.patch.object(QUOTAS, 'commit') as quotas_commit: extend_volume.return_value = fake_extend volume.status = 'extending' self.volume.extend_volume(self.context, volume, '4', fake_reservations) volume.refresh() self.assertEqual(4, volume.size) self.assertEqual(orig_status, volume.status) quotas_commit.assert_called_with( self.context, ['RESERVATION'], project_id=volume.project_id) if orig_status == 'in-use': instance_uuids = [ attachment.instance_uuid for attachment in volume.volume_attachment] nova_extend_volume.assert_called_with( self.context, instance_uuids, volume.id) def test_extend_volume_manager_available_fails_with_exception(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) self._test_extend_volume_manager_fails_with_exception(volume) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_available_successful(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) self._test_extend_volume_manager_successful(volume) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_in_use_fails_with_exception(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) instance_uuid = '12345678-1234-5678-1234-567812345678' attachment = db.volume_attach(self.context, {'volume_id': volume.id, 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment.id, instance_uuid, 'fake-host', 'vdb') volume.refresh() self._test_extend_volume_manager_fails_with_exception(volume) self.volume.detach_volume(self.context, volume.id, attachment.id) self.volume.delete_volume(self.context, volume) def test_extend_volume_manager_in_use_successful(self): volume = tests_utils.create_volume(self.context, size=2, status='creating', host=CONF.host) self.volume.create_volume(self.context, volume) instance_uuid = '12345678-1234-5678-1234-567812345678' attachment = db.volume_attach(self.context, {'volume_id': volume.id, 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment.id, instance_uuid, 'fake-host', 'vdb') volume.refresh() self._test_extend_volume_manager_successful(volume) self.volume.detach_volume(self.context, volume.id, attachment.id) self.volume.delete_volume(self.context, volume) @mock.patch('cinder.volume.rpcapi.VolumeAPI.extend_volume') def test_extend_volume_with_volume_type(self, mock_rpc_extend): elevated = context.get_admin_context() project_id = self.context.project_id db.volume_type_create(elevated, {'name': 'type', 'extra_specs': {}}) vol_type = db.volume_type_get_by_name(elevated, 'type') volume_api = cinder.volume.api.API() volume = volume_api.create(self.context, 100, 'name', 'description', volume_type=vol_type) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_in_use = usage.in_use except exception.QuotaUsageNotFound: volumes_in_use = 0 self.assertEqual(100, volumes_in_use) db.volume_update(self.context, volume.id, {'status': 'available'}) volume_api._extend(self.context, volume, 200) mock_rpc_extend.called_once_with(self.context, volume, 200, mock.ANY) try: usage = db.quota_usage_get(elevated, project_id, 'gigabytes_type') volumes_reserved = usage.reserved except exception.QuotaUsageNotFound: volumes_reserved = 0 self.assertEqual(100, volumes_reserved) def test_create_volume_from_sourcevol(self): """Test volume can be created from a source volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.mock_object(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst) volume_dst.refresh() self.assertEqual('available', volume_dst.status) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_volume(self.context, volume_src) def test_create_volume_from_sourcevol_fail_bad_size(self): """Test cannot clone volume with bad volume size.""" volume_src = tests_utils.create_volume(self.context, size=3, status='available', host=CONF.host) self.assertRaises(exception.InvalidInput, self.volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src) @mock.patch('cinder.volume.api.API.list_availability_zones', return_value=({'name': 'nova', 'available': True}, {'name': 'az2', 'available': True})) def test_create_volume_from_sourcevol_fail_wrong_az(self, _mock_laz): """Test volume can't be cloned from an other volume in different az.""" volume_api = cinder.volume.api.API() volume_src = self._create_volume(self.context, availability_zone='az2', **self.volume_params) self.volume.create_volume(self.context, volume_src) volume_src = db.volume_get(self.context, volume_src['id']) volume_dst = volume_api.create(self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src, volume_type= objects.VolumeType.get_by_name_or_id( self.context, self.vol_type['id'])) self.assertEqual('az2', volume_dst['availability_zone']) self.assertRaises(exception.InvalidInput, volume_api.create, self.context, size=1, name='fake_name', description='fake_desc', source_volume=volume_src, availability_zone='nova') @mock.patch('cinder.image.image_utils.qemu_img_info') def test_create_volume_from_sourcevol_with_glance_metadata( self, mock_qemu_info): """Test glance metadata can be correctly copied to new volume.""" def fake_create_cloned_volume(volume, src_vref): pass self.mock_object(self.volume.driver, 'create_cloned_volume', fake_create_cloned_volume) image_info = imageutils.QemuImgInfo() image_info.virtual_size = '1073741824' mock_qemu_info.return_value = image_info volume_src = self._create_volume_from_image() self.volume.create_volume(self.context, volume_src) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.volume.create_volume(self.context, volume_dst) self.assertEqual('available', db.volume_get(context.get_admin_context(), volume_dst['id']).status) # TODO: review all tests in this file to make sure they are # using the defined db.api to access stuff rather than taking # shortcuts like the following (see LP Bug #1860817): # src_glancemeta = db.volume_get(context.get_admin_context(), # volume_src['id']).volume_glance_metadata src_glancemeta = db.volume_glance_metadata_get( context.get_admin_context(), volume_src['id']) dst_glancemeta = db.volume_glance_metadata_get( context.get_admin_context(), volume_dst['id']) for meta_src in src_glancemeta: for meta_dst in dst_glancemeta: if meta_dst.key == meta_src.key: self.assertEqual(meta_src.value, meta_dst.value) self.volume.delete_volume(self.context, volume_src) self.volume.delete_volume(self.context, volume_dst) def test_create_volume_from_sourcevol_failed_clone(self): """Test src vol status will be restore by error handling code.""" def fake_error_create_cloned_volume(volume, src_vref): db.volume_update(self.context, src_vref['id'], {'status': 'error'}) raise exception.CinderException('fake exception') self.mock_object(self.volume.driver, 'create_cloned_volume', fake_error_create_cloned_volume) volume_src = tests_utils.create_volume(self.context, **self.volume_params) self.assertEqual('creating', volume_src.status) self.volume.create_volume(self.context, volume_src) self.assertEqual('available', volume_src.status) volume_dst = tests_utils.create_volume(self.context, source_volid=volume_src['id'], **self.volume_params) self.assertEqual('creating', volume_dst.status) self.assertRaises(exception.CinderException, self.volume.create_volume, self.context, volume_dst) # Source volume's status is still available and dst is set to error self.assertEqual('available', volume_src.status) self.assertEqual('error', volume_dst.status) self.volume.delete_volume(self.context, volume_dst) self.volume.delete_volume(self.context, volume_src) def test_clean_temporary_volume(self): def fake_delete_volume(ctxt, volume): volume.destroy() fake_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host, migration_status='migrating') fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) # 1. Only clean the db self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=True) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # 2. Delete the backend storage fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) with mock.patch.object(volume_rpcapi.VolumeAPI, 'delete_volume') as \ mock_delete_volume: mock_delete_volume.side_effect = fake_delete_volume self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume, clean_db_only=False) self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, fake_new_volume.id) # Check when the migrated volume is not in migration fake_new_volume = tests_utils.create_volume(self.context, size=1, host=CONF.host) fake_volume.migration_status = 'non-migrating' fake_volume.save() self.volume._clean_temporary_volume(self.context, fake_volume, fake_new_volume) volume = db.volume_get(context.get_admin_context(), fake_new_volume.id) self.assertIsNone(volume.migration_status) def test_check_volume_filters_true(self): """Test bootable as filter for true""" volume_api = cinder.volume.api.API() filters = {'bootable': 'TRUE'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against True self.assertTrue(filters['bootable']) def test_check_volume_filters_false(self): """Test bootable as filter for false""" volume_api = cinder.volume.api.API() filters = {'bootable': 'false'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against False self.assertEqual(False, filters['bootable']) def test_check_volume_filters_invalid(self): """Test bootable as filter""" volume_api = cinder.volume.api.API() filters = {'bootable': 'invalid'} # To convert filter value to True or False volume_api.check_volume_filters(filters) # Confirming converted filter value against invalid value self.assertTrue(filters['bootable']) def test_update_volume_readonly_flag(self): """Test volume readonly flag can be updated at API level.""" # create a volume and assign to host volume = tests_utils.create_volume(self.context, admin_metadata={'readonly': 'True'}, **self.volume_params) self.volume.create_volume(self.context, volume) volume.status = 'in-use' def sort_func(obj): return obj['name'] volume_api = cinder.volume.api.API() # Update fails when status != available self.assertRaises(exception.InvalidVolume, volume_api.update_readonly_flag, self.context, volume, False) volume.status = 'available' # works when volume in 'available' status volume_api.update_readonly_flag(self.context, volume, False) volume.refresh() self.assertEqual('available', volume.status) admin_metadata = volume.volume_admin_metadata self.assertEqual(1, len(admin_metadata)) self.assertEqual('readonly', admin_metadata[0]['key']) self.assertEqual('False', admin_metadata[0]['value']) # clean up self.volume.delete_volume(self.context, volume) def test_secure_file_operations_enabled(self): """Test secure file operations setting for base driver. General, non network file system based drivers do not have anything to do with "secure_file_operations". This test verifies that calling the method always returns False. """ ret_flag = self.volume.driver.secure_file_operations_enabled() self.assertFalse(ret_flag) @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_secure_file_operations_enabled_2(self, mock_secure): mock_secure.return_value = True vol = tests_utils.create_volume(self.context) result = self.volume.secure_file_operations_enabled(self.context, vol) mock_secure.assert_called_once_with() self.assertTrue(result) @mock.patch('cinder.volume.flows.common.make_pretty_name', new=mock.MagicMock()) @mock.patch('cinder.scheduler.rpcapi.SchedulerAPI.create_volume', return_value=None) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute', side_effect=exception.DriverNotInitialized()) def test_create_volume_raise_rescheduled_exception(self, mock_execute, mock_reschedule): # Create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] self.assertRaises(exception.DriverNotInitialized, self.volume.create_volume, self.context, test_vol, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) self.assertTrue(mock_reschedule.called) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('creating', volume['status']) # We increase the stats on entering the create method, but we must # have cleared them on reschedule. self.assertEqual({'_pool0': {'allocated_capacity_gb': 0}}, self.volume.stats['pools']) @mock.patch('cinder.volume.flows.manager.create_volume.' 'CreateVolumeFromSpecTask.execute') def test_create_volume_raise_unrescheduled_exception(self, mock_execute): # create source volume test_vol = tests_utils.create_volume(self.context, **self.volume_params) test_vol_id = test_vol['id'] mock_execute.side_effect = exception.VolumeNotFound( volume_id=test_vol_id) self.assertRaises(exception.VolumeNotFound, self.volume.create_volume, self.context, test_vol, {'volume_properties': self.volume_params}, {'retry': {'num_attempts': 1, 'host': []}}) volume = db.volume_get(context.get_admin_context(), test_vol_id) self.assertEqual('error', volume['status']) self.assertEqual({'_pool0': {'allocated_capacity_gb': 1}}, self.volume.stats['pools']) def test_cascade_delete_volume_with_snapshots(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() volume_api.delete(self.context, volume, cascade=True) def test_cascade_delete_volume_with_snapshots_error(self): """Test volume deletion with dependent snapshots.""" volume = tests_utils.create_volume(self.context, **self.volume_params) self.volume.create_volume(self.context, volume) snapshot = create_snapshot(volume['id'], size=volume['size']) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) snapshot.update({'status': fields.SnapshotStatus.CREATING}) snapshot.save() volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.context, volume, cascade=True) def test_cascade_force_delete_volume_with_snapshots_error(self): """Test volume force deletion with errored dependent snapshots.""" volume = tests_utils.create_volume(self.context, host='fakehost') snapshot = create_snapshot(volume.id, size=volume.size, status=fields.SnapshotStatus.ERROR_DELETING) self.volume.create_snapshot(self.context, snapshot) volume_api = cinder.volume.api.API() volume_api.delete(self.context, volume, cascade=True, force=True) snapshot = objects.Snapshot.get_by_id(self.context, snapshot.id) self.assertEqual('deleting', snapshot.status) volume = objects.Volume.get_by_id(self.context, volume.id) self.assertEqual('deleting', volume.status) def test_cascade_delete_volume_with_snapshots_in_other_project(self): """Test volume deletion with dependent snapshots in other project.""" volume = tests_utils.create_volume(self.user_context, **self.volume_params) snapshot = create_snapshot(volume['id'], size=volume['size'], project_id=fake.PROJECT2_ID) self.volume.create_snapshot(self.context, snapshot) self.assertEqual( snapshot.id, objects.Snapshot.get_by_id(self.context, snapshot.id).id) volume['status'] = 'available' volume['host'] = 'fakehost' volume_api = cinder.volume.api.API() self.assertRaises(exception.InvalidVolume, volume_api.delete, self.user_context, volume, cascade=True) @mock.patch.object(driver.BaseVD, 'get_backup_device') @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_get_backup_device(self, mock_secure, mock_get_backup): vol = tests_utils.create_volume(self.context) backup = tests_utils.create_backup(self.context, vol['id']) mock_secure.return_value = False mock_get_backup.return_value = (vol, False) result = self.volume.get_backup_device(self.context, backup) mock_get_backup.assert_called_once_with(self.context, backup) mock_secure.assert_called_once_with() expected_result = {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False} self.assertEqual(expected_result, result) @mock.patch.object(driver.BaseVD, 'get_backup_device') @mock.patch.object(driver.BaseVD, 'secure_file_operations_enabled') def test_get_backup_device_want_objects(self, mock_secure, mock_get_backup): vol = tests_utils.create_volume(self.context) backup = tests_utils.create_backup(self.context, vol['id']) mock_secure.return_value = False mock_get_backup.return_value = (vol, False) result = self.volume.get_backup_device(self.context, backup, want_objects=True) mock_get_backup.assert_called_once_with(self.context, backup) mock_secure.assert_called_once_with() expected_result = objects.BackupDeviceInfo.from_primitive( {'backup_device': vol, 'secure_enabled': False, 'is_snapshot': False}, self.context) self.assertEqual(expected_result, result) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'SUPPORTS_ACTIVE_ACTIVE', True) def test_set_resource_host_different(self): manager = vol_manager.VolumeManager(host='localhost-1@ceph', cluster='mycluster@ceph') volume = tests_utils.create_volume(self.user_context, host='localhost-2@ceph#ceph', cluster_name='mycluster@ceph') manager._set_resource_host(volume) volume.refresh() self.assertEqual('localhost-1@ceph#ceph', volume.host) @mock.patch('cinder.tests.fake_driver.FakeLoggingVolumeDriver.' 'SUPPORTS_ACTIVE_ACTIVE', True) def test_set_resource_host_equal(self): manager = vol_manager.VolumeManager(host='localhost-1@ceph', cluster='mycluster@ceph') volume = tests_utils.create_volume(self.user_context, host='localhost-1@ceph#ceph', cluster_name='mycluster@ceph') with mock.patch.object(volume, 'save') as save_mock: manager._set_resource_host(volume) save_mock.assert_not_called() def test_volume_attach_attaching(self): """Test volume_attach.""" instance_uuid = '12345678-1234-5678-1234-567812345678' volume = tests_utils.create_volume(self.context, **self.volume_params) attachment = db.volume_attach(self.context, {'volume_id': volume['id'], 'attached_host': 'fake-host'}) db.volume_attached(self.context, attachment['id'], instance_uuid, 'fake-host', 'vdb', mark_attached=False) volume_api = cinder.volume.api.API() volume = volume_api.get(self.context, volume['id']) self.assertEqual("attaching", volume['status']) self.assertEqual("attaching", volume['attach_status']) def test__append_volume_stats_with_pools(self): manager = vol_manager.VolumeManager() manager.stats = {'pools': {'pool1': {'allocated_capacity_gb': 20}, 'pool2': {'allocated_capacity_gb': 10}}} vol_stats = {'vendor_name': 'Open Source', 'pools': [ {'pool_name': 'pool1', 'provisioned_capacity_gb': 31}, {'pool_name': 'pool2', 'provisioned_capacity_gb': 21}]} manager._append_volume_stats(vol_stats) expected = {'vendor_name': 'Open Source', 'pools': [ {'pool_name': 'pool1', 'provisioned_capacity_gb': 31, 'allocated_capacity_gb': 20}, {'pool_name': 'pool2', 'provisioned_capacity_gb': 21, 'allocated_capacity_gb': 10}]} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_no_pools(self): manager = vol_manager.VolumeManager() manager.stats = {'pools': {'backend': {'allocated_capacity_gb': 20}}} vol_stats = {'provisioned_capacity_gb': 30} manager._append_volume_stats(vol_stats) expected = {'provisioned_capacity_gb': 30, 'allocated_capacity_gb': 20} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_no_pools_no_volumes(self): manager = vol_manager.VolumeManager() # This is what gets set on c-vol manager's init_host method manager.stats = {'pools': {}, 'allocated_capacity_gb': 0} vol_stats = {'provisioned_capacity_gb': 30} manager._append_volume_stats(vol_stats) expected = {'provisioned_capacity_gb': 30, 'allocated_capacity_gb': 0} self.assertDictEqual(expected, vol_stats) def test__append_volume_stats_driver_error(self): manager = vol_manager.VolumeManager() self.assertRaises(exception.ProgrammingError, manager._append_volume_stats, {'pools': 'bad_data'}) def test_default_tpool_size(self): self.skipTest("Bug 1811663") """Test we can set custom tpool size.""" eventlet.tpool._nthreads = 10 self.assertListEqual([], eventlet.tpool._threads) vol_manager.VolumeManager() self.assertEqual(20, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) def test_tpool_size(self): self.skipTest("Bug 1811663") """Test we can set custom tpool size.""" self.assertNotEqual(100, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) self.override_config('backend_native_threads_pool_size', 100, group='backend_defaults') vol_manager.VolumeManager() self.assertEqual(100, eventlet.tpool._nthreads) self.assertListEqual([], eventlet.tpool._threads) eventlet.tpool._nthreads = 20 class VolumeTestCaseLocks(base.BaseVolumeTestCase): MOCK_TOOZ = False def test_create_volume_from_volume_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) src_vol_id = src_vol['id'] # no lock self.volume.create_volume(self.context, src_vol) dst_vol = tests_utils.create_volume(self.context, source_volid=src_vol_id, **self.volume_params) orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.mock_object(self.context, 'elevated', orig_elevated) # we expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume=dst_vol, request_spec={'source_volid': src_vol_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.mock_object(self.context, 'elevated', mock_elevated) # locked self.volume.delete_volume(self.context, src_vol) # we expect the volume create to fail with the following err since the # source volume was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.VolumeNotFound, gthreads[0].wait) def test_create_volume_from_snapshot_delete_lock_taken(self): # create source volume src_vol = tests_utils.create_volume(self.context, **self.volume_params) # no lock self.volume.create_volume(self.context, src_vol) # create snapshot snap_id = create_snapshot(src_vol.id, size=src_vol['size'])['id'] snapshot_obj = objects.Snapshot.get_by_id(self.context, snap_id) # no lock self.volume.create_snapshot(self.context, snapshot_obj) # create vol from snapshot... dst_vol = tests_utils.create_volume(self.context, snapshot_id=snap_id, source_volid=src_vol.id, **self.volume_params) orig_elevated = self.context.elevated gthreads = [] def mock_elevated(*args, **kwargs): # unset mock so it is only called once self.mock_object(self.context, 'elevated', orig_elevated) # We expect this to block and then fail t = eventlet.spawn(self.volume.create_volume, self.context, volume=dst_vol, request_spec={'snapshot_id': snap_id}) gthreads.append(t) return orig_elevated(*args, **kwargs) # mock something from early on in the delete operation and within the # lock so that when we do the create we expect it to block. self.mock_object(self.context, 'elevated', mock_elevated) # locked self.volume.delete_snapshot(self.context, snapshot_obj) # we expect the volume create to fail with the following err since the # snapshot was deleted while the create was locked. Note that the # volume is still in the db since it was created by the test prior to # calling manager.create_volume. with mock.patch('sys.stderr', new=six.StringIO()): self.assertRaises(exception.SnapshotNotFound, gthreads[0].wait) # locked self.volume.delete_volume(self.context, src_vol) # make sure it is gone self.assertRaises(exception.VolumeNotFound, db.volume_get, self.context, src_vol.id)
from __future__ import absolute_import from __future__ import division from __future__ import print_function from sklearn import metrics import tensorflow as tf from tensorflow.contrib import learn # from SKFLOW ### Download and load MNIST data. mnist = learn.datasets.load_dataset('mnist') ### Convolutional network def max_pool_2x2(tensor_in): return tf.nn.max_pool( tensor_in, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') def conv_model(X, y): # pylint: disable=invalid-name,missing-docstring # reshape X to 4d tensor with 2nd and 3rd dimensions being image width and # height final dimension being the number of color channels. X = tf.reshape(X, [-1, 28, 28, 1]) # first conv layer will compute 32 features for each 5x5 patch with tf.variable_scope('conv_layer1'): h_conv1 = learn.ops.conv2d(X, n_filters=32, filter_shape=[5, 5], bias=True, activation=tf.nn.relu) h_pool1 = max_pool_2x2(h_conv1) # second conv layer will compute 64 features for each 5x5 patch. with tf.variable_scope('conv_layer2'): h_conv2 = learn.ops.conv2d(h_pool1, n_filters=64, filter_shape=[5, 5], bias=True, activation=tf.nn.relu) h_pool2 = max_pool_2x2(h_conv2) # reshape tensor into a batch of vectors h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64]) # densely connected layer with 1024 neurons. h_fc1 = learn.ops.dnn( h_pool2_flat, [1024], activation=tf.nn.relu, dropout=0.5) return learn.models.logistic_regression(h_fc1, y) # Training and predicting. classifier = learn.TensorFlowEstimator( model_fn=conv_model, n_classes=10, batch_size=100, steps=20000, learning_rate=0.001) classifier.fit(mnist.train.images, mnist.train.labels) score = metrics.accuracy_score( mnist.test.labels, classifier.predict(mnist.test.images)) print('Accuracy: {0:f}'.format(score))
import numpy as np from traffic_simulator import TrafficSim if __name__ == "__main__": env = TrafficSim(["./ngsim"]) obs = env.reset() act = np.random.normal(0, 1, size=(2,)) obs, rew, done, info = env.step(act) print("finished") env.close()
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. from aliyunsdkcore.request import RpcRequest from aliyunsdkmts.endpoint import endpoint_data class UpdateMCTemplateRequest(RpcRequest): def __init__(self): RpcRequest.__init__(self, 'Mts', '2014-06-18', 'UpdateMCTemplate') if hasattr(self, "endpoint_map"): setattr(self, "endpoint_map", endpoint_data.getEndpointMap()) if hasattr(self, "endpoint_regional"): setattr(self, "endpoint_regional", endpoint_data.getEndpointRegional()) def get_Politics(self): return self.get_query_params().get('Politics') def set_Politics(self,Politics): self.add_query_param('Politics',Politics) def get_ResourceOwnerId(self): return self.get_query_params().get('ResourceOwnerId') def set_ResourceOwnerId(self,ResourceOwnerId): self.add_query_param('ResourceOwnerId',ResourceOwnerId) def get_Abuse(self): return self.get_query_params().get('Abuse') def set_Abuse(self,Abuse): self.add_query_param('Abuse',Abuse) def get_Qrcode(self): return self.get_query_params().get('Qrcode') def set_Qrcode(self,Qrcode): self.add_query_param('Qrcode',Qrcode) def get_Porn(self): return self.get_query_params().get('Porn') def set_Porn(self,Porn): self.add_query_param('Porn',Porn) def get_Terrorism(self): return self.get_query_params().get('Terrorism') def set_Terrorism(self,Terrorism): self.add_query_param('Terrorism',Terrorism) def get_Logo(self): return self.get_query_params().get('Logo') def set_Logo(self,Logo): self.add_query_param('Logo',Logo) def get_Live(self): return self.get_query_params().get('Live') def set_Live(self,Live): self.add_query_param('Live',Live) def get_Contraband(self): return self.get_query_params().get('Contraband') def set_Contraband(self,Contraband): self.add_query_param('Contraband',Contraband) def get_Ad(self): return self.get_query_params().get('Ad') def set_Ad(self,Ad): self.add_query_param('Ad',Ad) def get_ResourceOwnerAccount(self): return self.get_query_params().get('ResourceOwnerAccount') def set_ResourceOwnerAccount(self,ResourceOwnerAccount): self.add_query_param('ResourceOwnerAccount',ResourceOwnerAccount) def get_OwnerAccount(self): return self.get_query_params().get('OwnerAccount') def set_OwnerAccount(self,OwnerAccount): self.add_query_param('OwnerAccount',OwnerAccount) def get_OwnerId(self): return self.get_query_params().get('OwnerId') def set_OwnerId(self,OwnerId): self.add_query_param('OwnerId',OwnerId) def get_TemplateId(self): return self.get_query_params().get('TemplateId') def set_TemplateId(self,TemplateId): self.add_query_param('TemplateId',TemplateId) def get_Name(self): return self.get_query_params().get('Name') def set_Name(self,Name): self.add_query_param('Name',Name) def get_spam(self): return self.get_query_params().get('spam') def set_spam(self,spam): self.add_query_param('spam',spam)
from os import path from ga4stpg.condition import BestKnownReached, Stagnation from ga4stpg.customevol import GeneticEvolution as Evolution from ga4stpg.customevol import GeneticPopulation as GPopulation from ga4stpg.graph import ReaderORLibrary from ga4stpg.graph.util import is_steiner_tree from ga4stpg.normalization import normalize from ga4stpg.selector import roullete from ga4stpg.tracker import DataTracker from ga4stpg.tree.evaluation import EvaluateTreeGraph from ga4stpg.tree.generate import GenerateBasedPrimRST from ga4stpg.tree.mstcrossover import CrossoverPrimRST from ga4stpg.tree.mutate import (PrimBasedMutation, Prunning, ReplaceByRandomEdge) from ga4stpg.util import STEIN_B, display, update_best, update_generation def simulation(simulation_name, params): datasets_folder = path.join("datasets", "ORLibrary") filename = path.join(datasets_folder, params["dataset"]) STPG = ReaderORLibrary().parser(filename) print("STPG information", '\n', 10*'- ','\n') print("Trial: ", parameters['runtrial']) print('Instance: ', STPG.name) print('Best Known cost: ', params['global_optimum']) print("Nro. Node:", STPG.nro_nodes) print("Nro. Edges:", STPG.nro_edges) print("Nro. Terminals:", STPG.nro_terminals) # print("Terminals: \n", STPG.terminals) output_folder = path.join("data", simulation_name, STPG.name) tracker = DataTracker(params['runtrial'],target=output_folder) generator = GenerateBasedPrimRST(STPG) evaluator = EvaluateTreeGraph(STPG) crossover = CrossoverPrimRST(STPG) prunner = Prunning(STPG) mut_prim = PrimBasedMutation(STPG) replace_random = ReplaceByRandomEdge(STPG) population = (GPopulation( chromosomes=[ generator() for _ in range(params["population_size"])], eval_function=evaluator, maximize=True) .mutate(mutate_function=prunner, probability=1.0) .evaluate() .normalize(norm_function=normalize) .callback(update_best)) evol = (Evolution() .evaluate() .normalize(norm_function=normalize) .callback(update_best) .callback(tracker.log_evaluation) .select(selection_func=roullete) .crossover(combiner=crossover) .mutate(mutate_function=replace_random, probability=0.3) .mutate(mutate_function=mut_prim, probability=0.3) .mutate(mutate_function=prunner, probability=0.5) .callback(update_generation) .callback(display, every=100)) with Stagnation(interval=params["stagnation_interval"]), \ BestKnownReached(global_optimum=params['global_optimum']): result = population.evolve(evol, n=params["n_iterations"]) tracker.log_simulation(params, STPG, result) best_overall = result.documented_best test, response = is_steiner_tree(best_overall.chromosome, STPG) tracker.log_bestIndividual(best_overall, test, response) tracker.report() if __name__ == "__main__": parameters = { 'runtrial' : 0, 'dataset' : 'steinb1.txt', 'global_optimum' : 82, 'population_size' : 100, 'tx_mutation' : 0.3, 'tx_crossover' : 1.0, 'n_iterations' : 4_000, 'stagnation_interval' : 500, } for dataset, value in STEIN_B: print('='*10,'\n', dataset) print('global optimum ', value) print('='*10, '\n') parameters['dataset'] = dataset parameters['global_optimum'] = value for i in range(50): parameters['runtrial'] = i + 1 simulation("S5PrimRST_pruner10Mst", parameters)
import socket import sys import struct # Erreursc UDP_TIMEOUT = "UDP timeout" TCP_TIMEOUT = "TCP timeout" PING_TIMEOUT = "PING Timeout" ICMP_TIMEOUT = "ICMP Timeout" PORT_ERROR = "Port unreachable" UDP_ERROR = "UDP packet failed" HOP_ERROR = "No answer" DEST_UNREACHABLE_MSG = "Dest unreach" # Trace constantes PORT = 443 MAX_HOPS_FORWARD = 30 # Trace stops at 30 hops MAX_TIMEOUT = 3 # Trace stops after 3 timeouts TIMEOUT = 0.5 # seconds before timeout K = 1 ZOO_TRACE_PORT = 20000 gns3_zoo_ip = "130.79.90.202" # Indicator (mpls tunnels revelation) NO_INDICATOR = 255 OPAQUE = 2 DUP_IP = 3 RTL = 4 FRPLA = 5 ERROR = -1 FRPLA_THRESHOLD = 3 RTLA_THRESHOLD = 3 # Types & IP packet constants TIME_EXCEEDED = 11 PORT_UNREACHABLE = 3 DEST_UNREACHABLE = 3 ECHO_REPLY = 0 ECHO_REQUEST = 8 MPLS_THRESHOLD = 150 UDP_ID = "udp" TCP_ID = "tcp" ICMP_ID = "icmp" gns3_addresses = ["192.168.3.1", "192.168.3.2", "192.168.5.1", "130.79.91.1","130.79.91.2", "130.79.90.105","130.79.90.106","130.79.90.2", "130.79.90.1", "192.168.8.2","10.5.0.1","10.1.0.1", "10.1.0.2","10.6.0.1","10.2.0.1", "10.2.0.2", "10.7.0.1", "10.3.0.1", "10.3.0.2", "10.8.0.1", "10.4.0.1", "10.4.0.2", "10.11.0.1", "10.9.0.1", "10.11.0.2", "10.12.0.1", "10.9.0.1", "192.168.2.1", "192.168.2.2", "192.168.6.1", "192.168.4.1", "192.168.4.2", "192.168.7.1", "130.79.91.105", "200.168.8.1", "200.168.8.2" ] gns3_addresses = []
import importlib from collections import OrderedDict from django.conf import settings class ProviderRegistry(object): def __init__(self): self.provider_map = OrderedDict() self.loaded = False def get_list(self, request=None): self.load() return [provider_cls(request) for provider_cls in self.provider_map.values()] def register(self, cls): self.provider_map[cls.id] = cls def by_id(self, id, request=None): self.load() return self.provider_map[id](request=request) def as_choices(self): self.load() for provider_cls in self.provider_map.values(): yield (provider_cls.id, provider_cls.name) def load(self): # TODO: Providers register with the provider registry when # loaded. Here, we build the URLs for all registered providers. So, we # really need to be sure all providers did register, which is why we're # forcefully importing the `provider` modules here. The overall # mechanism is way to magical and depends on the import order et al, so # all of this really needs to be revisited. if not self.loaded: for app in settings.INSTALLED_APPS: try: provider_module = importlib.import_module(app + ".provider") # provider_module = importlib.import_module('allauth.socialaccount.providers.discord' + ".provider") print(provider_module) except ImportError as e: print(e) else: for cls in getattr(provider_module, "provider_classes", []): # print(cls) self.register(cls) self.loaded = True registry = ProviderRegistry()
import simpy from components.base.bus.abst_bus_can import AbstractCANBus import config.timing_registration as time from tools.general import General as G, RefList, General from config import project_registration as proj, can_registration from tools.ecu_logging import ECULogger as L, ECULogger from io_processing.surveillance_handler import MonitorInput, MonitorTags import uuid from uuid import UUID class RapidCANBus(AbstractCANBus): ''' This class implements a CAN Bus that actively pulls messages from the ECUs buffers ''' def __init__(self, sim_env, bus_id, data_rate, avg_ecu_dist=2): ''' Constructor Input: sim_env simpy.Environment environment in which this Bus acts bus_id string id of this Bus object data_rate float datarate of this bus avg_ecu_dist float average distance between two connected ECUs Output: - ''' AbstractCANBus.__init__(self, sim_env, bus_id, data_rate, avg_ecu_dist) # bus objects self.current_message = None # current message on the bus [sender_ecu, message] self.set_settings() self.monitor_list = RefList() self._used_prop_times = {} self.gateways = [] self.first = True # synchronization objects self.pot_messages = [] # gathers all potential messages that want to be sent at a certain point in time self.sync_1 = simpy.Store(self.sim_env, capacity=1) # if the decision, who is allowed to sent is done this synchronizer starts the transmission self.sync_2 = simpy.Store(self.sim_env, capacity=1) # if store is empty then the channel is busy self.subscribers = 0 # number of ECUs waiting for the channel to be freed self.current_message_length_bit = 0 # project parameters self.SCB_GATHER_MSGS = time.SCB_GATHER_MSGS self.SCB_GRAB_PRIO_MSG = time.SCB_GRAB_PRIO_MSG self.SCB_PROPAGATION_DELAY = time.SCB_PROPAGATION_DELAY self.SCB_SENDING_TIME = time.SCB_SENDING_TIME self.SCB_WRITE_TO_TRANSCEIVER_BUFFER = time.SCB_WRITE_TO_TRANSCEIVER_BUFFER # ECUs Datalink layers willing to send self._willing_dll = [] def monitor_update(self): ''' returns the input for the monitor Input: - Output: monitor_list list List of MonitorInput objects ''' self.monitor_list.clear_on_access() # on the next access the list will be cleared return self.monitor_list.get() def release_willing(self, dll): ''' remove the ecus that are not willing to send anymore Input: dll AbstractDataLinkLayer Datalink layer of the ECU that is not willing to send anymore Output: - ''' self._willing_dll.remove(dll) def add_willing(self, dll): ''' add a Datalinklayer of an ECU that is willing to send Input: dll AbstractDataLinkLayer Datalink layer of the ECU that is willing to send Output: - ''' if dll not in self._willing_dll: self._willing_dll.append(dll) def notify_bus(self): ''' When the bus is empty it is set to a sleep mode that waits until the ecu notifies it. Using this method the ECU does so. Thus the Bus will only be active when any ecu sends. Input: - Output: - ''' if self.current_message != None: return else: self.sync_1.put(True) def process(self): ''' Constantly pull messages from all ECUs that are connected. Once the Bus is done with one message it pulls the next message from the connected ECUs. Input: - Output: - ''' stp = 0 while True: # print time t = self.sim_env.now if t > stp: # print(self.sim_env.now) stp += 0.5 # check which ECU sends current_minimum = float("inf"); index = 0 for dll in self._willing_dll: val = dll.first_queue_identifier() if val < current_minimum and val != False: current_minimum = index index += 1 # no ECU wiling to send: wait for notify if current_minimum == float("inf"): yield self.sync_1.get() else: self.current_message = self._willing_dll[current_minimum].controller.transmit_buffer.get().message self.current_message_length_bit = self.current_message.msg_length_in_bit # transmit message if self.current_message != None: # monitor start monitor_note = self._monitor_transmission_start() # write to buffer yield self.sim_env.process(self._wait_transmission_time_and_buffer()) self._reset_transmission() # monitor end self._monitor_transmission_end(monitor_note) # if ecus buffer is now empty remove from willing if(len(self._willing_dll[current_minimum].controller.transmit_buffer.queue) == 0): self.release_willing(self._willing_dll[current_minimum]) def set_settings(self): ''' sets the initial setting association between the settings variables and the actual parameter Input: - Output: - ''' self.settings = {} # parameter self.settings['t_gather_msg'] = 'SCB_GATHER_MSGS' self.settings['t_grab_prio_msg'] = 'SCB_GRAB_PRIO_MSG' self.settings['t_propagation_delay'] = 'SCB_PROPAGATION_DELAY' self.settings['t_sending_time'] = 'SCB_SENDING_TIME' self.settings['t_write_to_transceiver_buffer'] = 'SCB_WRITE_TO_TRANSCEIVER_BUFFER' def wait_until_free(self): ''' when the channel is busy some ECUs can start this method in a simpy process. Once the channel is free this process ends and the next ECU can start it's transmission technically: count number of waiting processes and notifies them all once the channel is free Input: - Output - ''' # add subscriber self.subscribers += 1 yield self.sync_2.get() # release all receivers while self.subscribers > 1: self.sync_2.put(True) self.subscribers -= 1 self.subscribers = 0 def _extract_transmission_times(self): ''' calculates the time the current transmission takes Input: - Output: t_propagation: float time it takes to propagate the message t_sending float time it takes to send the message ''' t_propagation = time.call(self.SCB_PROPAGATION_DELAY, self.avg_dist_between_ecus) # either constant or calculated depending on config t_sending = time.call(self.SCB_SENDING_TIME, self.current_message_length_bit, proj.BUS_ECU_DATARATE) # either constant or calculated depending on config return t_propagation, t_sending def _gateway_sends(self, ecu): ''' if gateway is the sender let it continue and reset the message state Input: ecu AbstractECU current ECU that sends the message Output: bool boolean True if the message was sent by this ECU ''' try: if ecu.ecu_id in self.current_message.gw_id: # send message back and forth send could lead to errors: need to reset gw_id list return True except: return False return False def _get_highest_priority_msg(self, message_list): ''' returns the message with the highest priority Input: message_list list list of messages Output: message object message with highest priority (lowest message id) ''' min_val = float("inf") message = None for cur_message in message_list: if min_val > cur_message.message_identifier: min_val = cur_message.message_identifier message = cur_message return message def _grab_highest_priority(self): ''' note the time it takes to select the message with the highest priority Input: - Output: - ''' if self.SCB_GRAB_PRIO_MSG != 0: G().to_t(self.sim_env, self.SCB_GRAB_PRIO_MSG, 'SCB_GRAB_PRIO_MSG', self.__class__.__name__, self) return True return False def _monitor_transmission_start(self): ''' notes the start time when this message was put on the bus Input: - Output: - ''' # extract information uid = uuid.uuid4() tag = MonitorTags.CB_PROCESSING_MESSAGE c_id = self.comp_id sender_id = self.current_message.sender_id msg_id = self.current_message.message_identifier msg_uid = self.current_message.data.unique_id data = self.current_message.data.get(); # extract further information msg = self.current_message size = self.current_message_length_bit / 8 self.current_message.data.unique_id = msg_uid # send to monitor G().mon(self.monitor_list, MonitorInput(data, tag, c_id, self.sim_env.now, sender_id, msg_id, msg, size, msg_id, uid.hex)) return data, c_id, sender_id, msg_id, msg, size, uid def _monitor_transmission_end(self, mon_out): ''' notes the end time when this message was put on the bus Input: - Output: - ''' G().mon(self.monitor_list, MonitorInput(mon_out[0], MonitorTags.CB_DONE_PROCESSING_MESSAGE, \ mon_out[1], self.sim_env.now, mon_out[2], mon_out[3], \ mon_out[4], mon_out[5], -1, mon_out[6].hex)) def _push_to_receivers(self): ''' writes the current message to all ecus that are connected to this Bus Input: - Output: - ''' # get gateways if self.first: self.gateways = [itm.ecu_id for itm in self.connected_ecus if isinstance(itm.ecu_id, UUID)] self.first = False # send only to receivers if General().send_only_to_receivers and self.current_message.message_identifier not in can_registration.AUTH_MESSAGES: run_list = General().sender_receiver_map[self.current_message.sender_id][self.current_message.message_identifier] + self.gateways for ecu in self.connected_ecus: if ecu.ecu_id not in run_list: continue # Gateway:avoid sending to itself (loops) if self._gateway_sends(ecu): continue # ECU: avoid sending to itself if(ecu.ecu_id != self.current_message.sender_id): self.current_message.current_bus = self.comp_id ecu.ecuHW.transceiver.get(self.current_message) else: # iterate over receivers for ecu in self.connected_ecus: # Gateway:avoid sending to itself (loops) if self._gateway_sends(ecu): continue # ECU: avoid sending to itself if(ecu.ecu_id != self.current_message.sender_id): self.current_message.current_bus = self.comp_id ecu.ecuHW.transceiver.get(self.current_message) def _reset_transmission(self): ''' after one message was sent three things have to be reset the current message. The synchronizer for the selection of the next higher prioritized message to be sent and the list that gathered the selected potential messages Input: - Output: - ''' self.current_message = None # message is not on the line anymore self.sync_2.put(True) # channel is free again self.pot_messages = [] # reset def _sending_ok(self, t_propagation, t_sending): ''' checks if this message is sendable Input: t_propagation: float time it takes to propagate the message t_sending float time it takes to send the message Output: bool boolean true if the time is valid ''' try: G().to_t(self.sim_env, t_propagation + t_sending + self.SCB_WRITE_TO_TRANSCEIVER_BUFFER, 'SCB_PROPAGATION_DELAY+SCB_SENDING_TIME+SCB_WRITE_TO_TRANSCEIVER_BUFFER', self.__class__.__name__, self) if t_propagation + t_sending + self.SCB_WRITE_TO_TRANSCEIVER_BUFFER > 0: return True # logging.error("Error (skipped with time 0):t_propagation =%s, t_sending = %s, self.SCB_WRITE_TO_TRANSCEIVER_BUFFER = %s // msg_length_bit = %s" % \ # (t_propagation, t_sending, self.SCB_WRITE_TO_TRANSCEIVER_BUFFER, self.current_message_length_bit)) except: return False return False def _try_logging_transmission(self, t_propagation, t_sending): ''' notes the times that it takes to send the messages. In case an erroneous message is sent this method logs the exception Input: t_propagation: float time it takes to propagate the message t_sending float time it takes to send the message ''' try: # Log transmission L().log(300, self.sim_env.now, self.current_message.sender_id, float(self.current_message_length_bit) / 8.0, \ self.current_message_length_bit, self.comp_id, self.current_message.data.get(), t_propagation + t_sending) except: # Log traceback ECULogger().log_traceback() try: L().log(300, self.sim_env.now, self.current_message.sender_id, self.current_message.data, \ self.current_message_length_bit, self.comp_id, self.current_message.data, t_propagation + t_sending) except: pass def _wait_transmission_time_and_buffer(self): ''' this method times out for the duration of the transmission and then writes the sent messages to the receiving buffer of the ecu Input: - Output: - ''' if not self.current_message_length_bit in self._used_prop_times: # sending times t_propagation, t_sending = self._extract_transmission_times() # duration of transmission wait_time = t_propagation + t_sending + self.SCB_WRITE_TO_TRANSCEIVER_BUFFER if wait_time <= 0: wait_time = 0.000001 self._used_prop_times[self.current_message_length_bit] = wait_time else: wait_time = self._used_prop_times[self.current_message_length_bit] yield self.sim_env.timeout(wait_time) # put to connected ecus self._push_to_receivers()
from treq.client import HTTPClient from _utils import print_response from twisted.internet.task import react from twisted.web.client import Agent def make_custom_agent(reactor): return Agent(reactor, connectTimeout=42) def main(reactor, *args): agent = make_custom_agent(reactor) http_client = HTTPClient(agent) d = http_client.get( 'https://secure.example.net/area51', auth=('admin', "you'll never guess!")) d.addCallback(print_response) return d react(main, [])
from eth_utils import to_set from eth import constants from eth.utils.address import ( force_bytes_to_address, ) THREE = force_bytes_to_address(b'\x03') @to_set def collect_touched_accounts(computation): """ Collect all of the accounts that *may* need to be deleted based on EIP161: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-161.md also see: https://github.com/ethereum/EIPs/issues/716 """ if computation.is_origin_computation and computation.transaction_context.gas_price == 0: yield computation.state.coinbase for beneficiary in sorted(set(computation.accounts_to_delete.values())): if computation.is_error and computation.is_origin_computation: # Special case to account for geth+parity bug # https://github.com/ethereum/EIPs/issues/716 if beneficiary == THREE: yield beneficiary continue else: yield beneficiary if computation.msg.to != constants.CREATE_CONTRACT_ADDRESS: if computation.is_error and computation.is_origin_computation: # Special case to account for geth+parity bug # https://github.com/ethereum/EIPs/issues/716 if computation.msg.to == THREE: yield computation.msg.to else: yield computation.msg.to if not computation.is_origin_computation or not computation.is_error: for child in computation.children: yield from collect_touched_accounts(child)
# This code starts recording audio, registers to the 'SourceTrackDataMessage' event to get the sound localization data back from Misty from mistyPy.Robot import Robot from mistyPy.Events import Events from mistyPy.EventFilters import EventFilters import requests import json import time # The callback function must only accept one parameter, which will be the event message data def registerAudioLocation(data): #callback function 1 print(data["message"]) Msg = data["message"] element = list(Msg.items())[0] # change 0 to the correct number to get the degree of arrival of speech print(element) def registerAudioLocation_MetaData(data): # callback function 2 print("Printing Meta data for audio localization......") print(data["message"]) if __name__ == "__main__": ipAddress = "IP ADRESS" misty = Robot(ipAddress) # Start recording audio file_to_record = {"FileName": "deleteThis.wav"} file = json.dumps(file_to_record, separators=(',', ':')) url = "http://" + ipAddress + "/api/audio/record/start" r3_A = requests.post(url, json=file) print(r3_A.status_code) # Register for events to get the direction of sound and other meta data try: misty.RegisterEvent("soudnIn", Events.SourceTrackDataMessage,callback_function=registerAudioLocation, debounce = 100, keep_alive= True) #misty.RegisterEvent("soudnIn_meta", Events.SourceFocusConfigMessage,callback_function=registerAudioLocation_MetaData, keep_alive= True) #misty.KeepAlive() except Exception as ex: print(ex) finally: misty.UnregisterAllEvents() # Unregister events if they aren't all unregistered due to an error # wait for T seconds T = 1 time.sleep(T) #Stop Recording audio url = "http://" + ipAddress + "/api/audio/record/stop" r3_B = requests.post(url) print(r3_B.status_code)
""" No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501 The version of the OpenAPI document: 20220523 Generated by: https://openapi-generator.tech """ import re # noqa: F401 import sys # noqa: F401 from pokeapiclient.api_client import ApiClient, Endpoint as _Endpoint from pokeapiclient.model_utils import ( # noqa: F401 check_allowed_values, check_validations, date, datetime, file_type, none_type, validate_and_convert_types ) class MoveTargetApi(object): """NOTE: This class is auto generated by OpenAPI Generator Ref: https://openapi-generator.tech Do not edit the class manually. """ def __init__(self, api_client=None): if api_client is None: api_client = ApiClient() self.api_client = api_client self.move_target_list_endpoint = _Endpoint( settings={ 'response_type': (str,), 'auth': [], 'endpoint_path': '/api/v2/move-target/', 'operation_id': 'move_target_list', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'limit', 'offset', ], 'required': [], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'limit': (int,), 'offset': (int,), }, 'attribute_map': { 'limit': 'limit', 'offset': 'offset', }, 'location_map': { 'limit': 'query', 'offset': 'query', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'text/plain' ], 'content_type': [], }, api_client=api_client ) self.move_target_read_endpoint = _Endpoint( settings={ 'response_type': (str,), 'auth': [], 'endpoint_path': '/api/v2/move-target/{id}/', 'operation_id': 'move_target_read', 'http_method': 'GET', 'servers': None, }, params_map={ 'all': [ 'id', ], 'required': [ 'id', ], 'nullable': [ ], 'enum': [ ], 'validation': [ ] }, root_map={ 'validations': { }, 'allowed_values': { }, 'openapi_types': { 'id': (int,), }, 'attribute_map': { 'id': 'id', }, 'location_map': { 'id': 'path', }, 'collection_format_map': { } }, headers_map={ 'accept': [ 'text/plain' ], 'content_type': [], }, api_client=api_client ) def move_target_list( self, **kwargs ): """move_target_list # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.move_target_list(async_req=True) >>> result = thread.get() Keyword Args: limit (int): [optional] offset (int): [optional] _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: str If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') return self.move_target_list_endpoint.call_with_http_info(**kwargs) def move_target_read( self, id, **kwargs ): """move_target_read # noqa: E501 This method makes a synchronous HTTP request by default. To make an asynchronous HTTP request, please pass async_req=True >>> thread = api.move_target_read(id, async_req=True) >>> result = thread.get() Args: id (int): Keyword Args: _return_http_data_only (bool): response data without head status code and headers. Default is True. _preload_content (bool): if False, the urllib3.HTTPResponse object will be returned without reading/decoding response data. Default is True. _request_timeout (int/float/tuple): timeout setting for this request. If one number provided, it will be total request timeout. It can also be a pair (tuple) of (connection, read) timeouts. Default is None. _check_input_type (bool): specifies if type checking should be done one the data sent to the server. Default is True. _check_return_type (bool): specifies if type checking should be done one the data received from the server. Default is True. _spec_property_naming (bool): True if the variable names in the input data are serialized names, as specified in the OpenAPI document. False if the variable names in the input data are pythonic names, e.g. snake case (default) _content_type (str/None): force body content-type. Default is None and content-type will be predicted by allowed content-types and body. _host_index (int/None): specifies the index of the server that we want to use. Default is read from the configuration. async_req (bool): execute request asynchronously Returns: str If the method is called asynchronously, returns the request thread. """ kwargs['async_req'] = kwargs.get( 'async_req', False ) kwargs['_return_http_data_only'] = kwargs.get( '_return_http_data_only', True ) kwargs['_preload_content'] = kwargs.get( '_preload_content', True ) kwargs['_request_timeout'] = kwargs.get( '_request_timeout', None ) kwargs['_check_input_type'] = kwargs.get( '_check_input_type', True ) kwargs['_check_return_type'] = kwargs.get( '_check_return_type', True ) kwargs['_spec_property_naming'] = kwargs.get( '_spec_property_naming', False ) kwargs['_content_type'] = kwargs.get( '_content_type') kwargs['_host_index'] = kwargs.get('_host_index') kwargs['id'] = \ id return self.move_target_read_endpoint.call_with_http_info(**kwargs)
"""Conversions between various entites.""" from vortexasdk.conversions.corporations import convert_to_corporation_ids from vortexasdk.conversions.geographies import convert_to_geography_ids from vortexasdk.conversions.products import convert_to_product_ids from vortexasdk.conversions.vessels import convert_to_vessel_ids
""" Functions for manipulating wiki-text. Unless otherwise noted, all functions take a unicode string as the argument and return a unicode string. """ # # (C) Pywikibot team, 2008-2022 # # Distributed under the terms of the MIT license. # import datetime import re from collections import OrderedDict, namedtuple from collections.abc import Sequence from contextlib import suppress from html.parser import HTMLParser from typing import NamedTuple, Optional, Union import pywikibot from pywikibot.backports import Container, Dict, Iterable, List from pywikibot.backports import OrderedDict as OrderedDictType from pywikibot.backports import Sequence as SequenceType from pywikibot.backports import Tuple from pywikibot.exceptions import InvalidTitleError, SiteDefinitionError from pywikibot.family import Family from pywikibot.tools import deprecated from pywikibot.userinterfaces.transliteration import NON_LATIN_DIGITS try: import wikitextparser except ImportError: try: import mwparserfromhell as wikitextparser except ImportError: # print required because pywikibot is not imported completely raise ImportError(""" Pywikibot is missing a MediaWiki markup parser which is necessary. Please update the required module with either pip install "mwparserfromhell>=0.5.0" or pip install "wikitextparser>=0.47.5" """) from None ETPType = List[Tuple[str, OrderedDictType[str, str]]] # cache for replaceExcept to avoid recompile or regexes each call _regex_cache = {} # The regex below collects nested templates, providing simpler # identification of templates used at the top-level of wikitext. # It doesn't match {{{1|...}}}, however it also does not match templates # with a numerical name. e.g. {{1|..}}. It will correctly match {{{x}} as # being {{x}} with leading '{' left in the wikitext. # Prefix msg: is not included in the 'name' group, but all others are # included for backwards compatibility with TEMP_REGEX. # Only parser functions using # are excluded. # When more than two levels of templates are found, this regex will # capture from the beginning of the first {{ to the end of the last }}, # with wikitext between templates as part of the parameters of the first # template in the wikitext. # This ensures it fallsback to a safe mode for replaceExcept, as it # ensures that any replacement will not occur within template text. NESTED_TEMPLATE_REGEX = re.compile(r""" {{\s*(?:msg:\s*)? (?P<name>[^{\|#0-9][^{\|#]*?)\s* (?:\|(?P<params> [^{]*? (({{{[^{}]+?}}} |{{[^{}]+?}} |{[^{}]*?} ) [^{]*? )*? )? )? }} | (?P<unhandled_depth>{{\s*[^{\|#0-9][^{\|#]*?\s* [^{]* {{ .* }}) """, re.VERBOSE | re.DOTALL) # The following regex supports wikilinks anywhere after the first pipe # and correctly matches the end of the file link if the wikilink contains # [[ or ]]. # The namespace names must be substituted into this regex. # e.g. FILE_LINK_REGEX % 'File' or FILE_LINK_REGEX % '|'.join(site.namespaces) FILE_LINK_REGEX = r""" \[\[\s* (?:%s) # namespace aliases \s*: (?=(?P<filename> [^]|]* ))(?P=filename) ( \| ( ( (?=(?P<inner_link> \[\[.*?\]\] ))(?P=inner_link) )? (?=(?P<other_chars> [^\[\]]* ))(?P=other_chars) | (?=(?P<not_wikilink> \[[^]]*\] ))(?P=not_wikilink) )*? )?? \]\] """ # Used in TimeStripper. When a timestamp-like line has longer gaps # than this between year, month, etc in it, then the line will not be # considered to contain a timestamp. TIMESTAMP_GAP_LIMIT = 10 def to_local_digits(phrase: Union[str, int], lang: str) -> str: """ Change Latin digits based on language to localized version. Be aware that this function only works for several languages, and that it returns an unchanged string if an unsupported language is given. :param phrase: The phrase to convert to localized numerical :param lang: language code :return: The localized version """ digits = NON_LATIN_DIGITS.get(lang) if digits: phrase = str(phrase) trans = str.maketrans('0123456789', digits) phrase = phrase.translate(trans) return phrase def to_latin_digits(phrase: str, langs: Union[SequenceType[str], str, None] = None) -> str: """Change non-latin digits to latin digits. .. versionadded:: 7.0 :param phrase: The phrase to convert to latin numerical. :param langs: Language codes. If langs parameter is None, use all known languages to convert. :return: The string with latin digits """ if langs is None: langs = NON_LATIN_DIGITS.keys() elif isinstance(langs, str): langs = [langs] digits = [NON_LATIN_DIGITS[key] for key in langs if key in NON_LATIN_DIGITS] if digits: trans = str.maketrans(''.join(digits), '0123456789' * len(digits)) phrase = phrase.translate(trans) return phrase def case_escape(case: str, string: str) -> str: """Return an escaped regex pattern which depends on 'first-letter' case. .. versionadded:: 7.0 :param case: if `case` is 'first-letter' the regex contains an upper/lower case set for the first letter """ first = string[0] if first.isalpha() and case == 'first-letter': pattern = '[{}{}]{}'.format(first.upper(), first.lower(), re.escape(string[1:])) else: pattern = re.escape(string) return pattern class MultiTemplateMatchBuilder: """Build template matcher.""" def __init__(self, site) -> None: """Initializer.""" self.site = site def pattern(self, template, flags=re.DOTALL): """Return a compiled regex to match template.""" # TODO: add ability to also match contents within the template # TODO: add option for template to be None to match any template # TODO: merge regex with NESTED_TEMPLATE_REGEX namespace = self.site.namespaces[10] if isinstance(template, pywikibot.Page): if template.namespace() == 10: old = template.title(with_ns=False) else: raise ValueError( '{} is not a template Page object'.format(template)) elif isinstance(template, str): old = template else: raise ValueError( '{!r} is not a valid template'.format(template)) pattern = case_escape(namespace.case, old) # namespaces may be any mixed case namespaces = [ignore_case(ns) for ns in namespace] namespaces.append(ignore_case('msg')) pattern = re.sub(r'_|\\ ', r'[_ ]', pattern) templateRegexP = ( r'{{\s*(%(namespace)s:)?%(pattern)s' r'(?P<parameters>\s*\|[^{]+?' r'((({{{[^{}]+?}}}|{{[^{}]+?}}|{[^{}]*?})[^{]*?)*?)?' r'|)\s*}}' ) % {'namespace': ':|'.join(namespaces), 'pattern': pattern} templateRegex = re.compile(templateRegexP, flags) return templateRegex def search_any_predicate(self, templates): """Return a predicate that matches any template.""" predicates = [self.pattern(template).search for template in templates] return lambda text: any(predicate(text) for predicate in predicates) def ignore_case(string: str) -> str: """Return a case-insensitive pattern for the string. .. versionchanged:: 7.2 `_ignore_case` becomes a public method """ return ''.join( '[{}{}]'.format(c, s) if c != s else c for s, c in zip(string, string.swapcase())) def _tag_pattern(tag_name: str) -> str: """Return a tag pattern for the given tag name.""" return ( r'<{0}(?:>|\s+[^>]*(?<!/)>)' # start tag r'[\s\S]*?' # contents r'</{0}\s*>' # end tag .format(ignore_case(tag_name))) def _tag_regex(tag_name: str): """Return a compiled tag regex for the given tag name.""" return re.compile(_tag_pattern(tag_name)) def _create_default_regexes() -> None: """Fill (and possibly overwrite) _regex_cache with default regexes.""" _regex_cache.update({ # categories 'category': (r'\[\[ *(?:%s)\s*:.*?\]\]', lambda site: '|'.join(site.namespaces[14])), 'comment': re.compile(r'<!--[\s\S]*?-->'), # files 'file': (FILE_LINK_REGEX, lambda site: '|'.join(site.namespaces[6])), # section headers 'header': re.compile( r'(?:(?<=\n)|\A)(?:<!--[\s\S]*?-->)*' r'=(?:[^\n]|<!--[\s\S]*?-->)+=' r' *(?:<!--[\s\S]*?--> *)*(?=\n|\Z)'), # external links 'hyperlink': compileLinkR(), # also finds links to foreign sites with preleading ":" 'interwiki': ( r'\[\[:?(%s)\s?:[^\]]*\]\]\s*', lambda site: '|'.join( ignore_case(i) for i in site.validLanguageLinks() + list(site.family.obsolete.keys()))), # Module invocations (currently only Lua) 'invoke': ( r'\{\{\s*\#(?:%s):[\s\S]*?\}\}', lambda site: '|'.join( ignore_case(mw) for mw in site.getmagicwords('invoke'))), # this matches internal wikilinks, but also interwiki, categories, and # images. 'link': re.compile(r'\[\[[^\]|]*(\|[^\]]*)?\]\]'), # pagelist tag (used in Proofread extension). 'pagelist': re.compile(r'<{}[\s\S]*?/>' .format(ignore_case('pagelist'))), # Wikibase property inclusions 'property': ( r'\{\{\s*\#(?:%s):\s*[Pp]\d+.*?\}\}', lambda site: '|'.join( ignore_case(mw) for mw in site.getmagicwords('property'))), # lines that start with a colon or more will be indented 'startcolon': re.compile(r'(?:(?<=\n)|\A):(.*?)(?=\n|\Z)'), # lines that start with a space are shown in a monospace font and # have whitespace preserved. 'startspace': re.compile(r'(?:(?<=\n)|\A) (.*?)(?=\n|\Z)'), # tables often have whitespace that is used to improve wiki # source code readability. # TODO: handle nested tables. 'table': re.compile( r'(?:(?<=\n)|\A){\|[\S\s]*?\n\|}|%s' % _tag_pattern('table')), 'template': NESTED_TEMPLATE_REGEX, }) def _get_regexes(keys, site): """Fetch compiled regexes.""" if not _regex_cache: _create_default_regexes() result = [] for exc in keys: if not isinstance(exc, str): # assume it's a regular expression result.append(exc) continue # assume the string is a reference to a standard regex above, # which may not yet have a site specific re compiled. if exc in _regex_cache: if isinstance(_regex_cache[exc], tuple): if not site and exc in ('interwiki', 'property', 'invoke', 'category', 'file'): raise ValueError("Site cannot be None for the '{}' regex" .format(exc)) if (exc, site) not in _regex_cache: re_text, re_var = _regex_cache[exc] _regex_cache[(exc, site)] = re.compile( re_text % re_var(site), re.VERBOSE) result.append(_regex_cache[(exc, site)]) else: result.append(_regex_cache[exc]) else: # nowiki, noinclude, includeonly, timeline, math and other # extensions _regex_cache[exc] = _tag_regex(exc) result.append(_regex_cache[exc]) # handle aliases if exc == 'source': result.append(_tag_regex('syntaxhighlight')) elif exc == 'syntaxhighlight': result.append(_tag_regex('source')) elif exc == 'chem': result.append(_tag_regex('ce')) elif exc == 'math': result.append(_tag_regex('chem')) result.append(_tag_regex('ce')) return result def replaceExcept(text: str, old, new, exceptions: list, caseInsensitive: bool = False, allowoverlap: bool = False, marker: str = '', site=None, count: int = 0) -> str: """ Return text with 'old' replaced by 'new', ignoring specified types of text. Skips occurrences of 'old' within exceptions; e.g., within nowiki tags or HTML comments. If caseInsensitive is true, then use case insensitive regex matching. If allowoverlap is true, overlapping occurrences are all replaced (watch out when using this, it might lead to infinite loops!). :param text: text to be modified :param old: a compiled or uncompiled regular expression :param new: a unicode string (which can contain regular expression references), or a function which takes a match object as parameter. See parameter repl of re.sub(). :param exceptions: a list of strings or already compiled regex objects which signal what to leave out. Strings might be like ['math', 'table', 'template'] for example. :param marker: a string that will be added to the last replacement; if nothing is changed, it is added at the end :param count: how many replacements to do at most. See parameter count of re.sub(). """ # if we got a string, compile it as a regular expression if isinstance(old, str): old = re.compile(old, flags=re.IGNORECASE if caseInsensitive else 0) # early termination if not relevant if not old.search(text): return text + marker dontTouchRegexes = _get_regexes(exceptions, site) index = 0 replaced = 0 markerpos = len(text) while not count or replaced < count: if index > len(text): break match = old.search(text, index) if not match: # nothing left to replace break # check which exception will occur next. nextExceptionMatch = None for dontTouchR in dontTouchRegexes: excMatch = dontTouchR.search(text, index) if excMatch and ( nextExceptionMatch is None or excMatch.start() < nextExceptionMatch.start()): nextExceptionMatch = excMatch if nextExceptionMatch is not None \ and nextExceptionMatch.start() <= match.start(): # an HTML comment or text in nowiki tags stands before the next # valid match. Skip. index = nextExceptionMatch.end() else: # We found a valid match. Replace it. if callable(new): # the parameter new can be a function which takes the match # as a parameter. replacement = new(match) else: # it is not a function, but a string. # it is a little hack to make \n work. It would be better # to fix it previously, but better than nothing. new = new.replace('\\n', '\n') # We cannot just insert the new string, as it may contain regex # group references such as \2 or \g<name>. # On the other hand, this approach does not work because it # can't handle lookahead or lookbehind (see bug T123185). # So we have to process the group references manually. replacement = '' group_regex = re.compile(r'\\(\d+)|\\g<(.+?)>') last = 0 for group_match in group_regex.finditer(new): group_id = group_match.group(1) or group_match.group(2) with suppress(ValueError): group_id = int(group_id) try: replacement += new[last:group_match.start()] replacement += match.group(group_id) or '' except IndexError: raise IndexError('Invalid group reference: {}\n' 'Groups found: {}' .format(group_id, match.groups())) last = group_match.end() replacement += new[last:] text = text[:match.start()] + replacement + text[match.end():] # continue the search on the remaining text if allowoverlap: index = match.start() + 1 else: index = match.start() + len(replacement) if not match.group(): # When the regex allows to match nothing, shift by one char index += 1 markerpos = match.start() + len(replacement) replaced += 1 text = text[:markerpos] + marker + text[markerpos:] return text def removeDisabledParts(text: str, tags: Optional[Iterable] = None, include: Optional[Container] = None, site: Optional['pywikibot.site.BaseSite'] = None ) -> str: """ Return text without portions where wiki markup is disabled. Parts that will be removed by default are: * HTML comments * nowiki tags * pre tags * includeonly tags * source and syntaxhighlight tags .. versionchanged:: 7.0 the order of removals will correspond to the tags argument if provided as an ordered collection (list, tuple) :param tags: The exact set of parts which should be removed using keywords from textlib._get_regexes(). :param include: Or, in alternative, default parts that shall not be removed. :param site: Site to be used for site-dependent regexes. Default disabled parts listed above do not need it. :return: text stripped from disabled parts. """ if not tags: tags = ['comment', 'includeonly', 'nowiki', 'pre', 'syntaxhighlight'] # avoid set(tags) because sets are internally ordered using the hash # which for strings is salted per Python process => the output of # this function would likely be different per script run because # the replacements would be done in different order and the disabled # parts may overlap and suppress each other # see https://docs.python.org/3/reference/datamodel.html#object.__hash__ # ("Note" at the end of the section) if include: tags = [tag for tag in tags if tag not in include] regexes = _get_regexes(tags, site) for regex in regexes: text = regex.sub('', text) return text def removeHTMLParts(text: str, keeptags: Optional[List[str]] = None) -> str: """ Return text without portions where HTML markup is disabled. Parts that can/will be removed are -- * HTML and all wiki tags The exact set of parts which should NOT be removed can be passed as the 'keeptags' parameter, which defaults to ['tt', 'nowiki', 'small', 'sup']. """ # try to merge with 'removeDisabledParts()' above into one generic function # thanks to: # https://www.hellboundhackers.org/articles/read-article.php?article_id=841 parser = _GetDataHTML() if keeptags is None: keeptags = ['tt', 'nowiki', 'small', 'sup'] with parser: parser.keeptags = keeptags parser.feed(text) return parser.textdata class _GetDataHTML(HTMLParser): """HTML parser which removes html tags except they are listed in keeptags. This class is also a context manager which closes itself at exit time. .. seealso:: :pylib:`html.parser` """ textdata = '' keeptags = [] def __enter__(self) -> None: pass def __exit__(self, *exc_info) -> None: self.close() def handle_data(self, data) -> None: """Add data to text.""" self.textdata += data def handle_starttag(self, tag, attrs) -> None: """Add start tag to text if tag should be kept.""" if tag in self.keeptags: self.textdata += '<{}>'.format(tag) def handle_endtag(self, tag) -> None: """Add end tag to text if tag should be kept.""" if tag in self.keeptags: self.textdata += '</{}>'.format(tag) def isDisabled(text: str, index: int, tags=None) -> bool: """ Return True if text[index] is disabled, e.g. by a comment or nowiki tags. For the tags parameter, see :py:obj:`removeDisabledParts`. """ # Find a marker that is not already in the text. marker = findmarker(text) text = text[:index] + marker + text[index:] text = removeDisabledParts(text, tags) return marker not in text def findmarker(text: str, startwith: str = '@@', append: Optional[str] = None) -> str: """Find a string which is not part of text.""" if not append: append = '@' mymarker = startwith while mymarker in text: mymarker += append return mymarker def expandmarker(text: str, marker: str = '', separator: str = '') -> str: """ Return a marker expanded whitespace and the separator. It searches for the first occurrence of the marker and gets the combination of the separator and whitespace directly before it. :param text: the text which will be searched. :param marker: the marker to be searched. :param separator: the separator string allowed before the marker. If empty it won't include whitespace too. :return: the marker with the separator and whitespace from the text in front of it. It'll be just the marker if the separator is empty. """ # set to remove any number of separator occurrences plus arbitrary # whitespace before, after, and between them, # by allowing to include them into marker. if separator: firstinmarker = text.find(marker) firstinseparator = firstinmarker lenseparator = len(separator) striploopcontinue = True while firstinseparator > 0 and striploopcontinue: striploopcontinue = False if (firstinseparator >= lenseparator and separator == text[firstinseparator - lenseparator:firstinseparator]): firstinseparator -= lenseparator striploopcontinue = True elif text[firstinseparator - 1] < ' ': firstinseparator -= 1 striploopcontinue = True marker = text[firstinseparator:firstinmarker] + marker return marker def replace_links(text: str, replace, site: 'pywikibot.site.BaseSite') -> str: """Replace wikilinks selectively. The text is searched for a link and on each link it replaces the text depending on the result for that link. If the result is just None it skips that link. When it's False it unlinks it and just inserts the label. When it is a Link instance it'll use the target, section and label from that Link instance. If it's a Page instance it'll use just the target from the replacement and the section and label from the original link. If it's a string and the replacement was a sequence it converts it into a Page instance. If the replacement is done via a callable it'll use it like unlinking and directly replace the link with the text itself. It only supports unicode when used by the callable and bytes are not allowed. If either the section or label should be used the replacement can be a function which returns a Link instance and copies the value which should remaining. .. versionchanged:: 7.0 `site` parameter is mandatory :param text: the text in which to replace links :param replace: either a callable which reacts like described above. The callable must accept four parameters link, text, groups, rng and allows for user interaction. The groups are a dict containing 'title', 'section', 'label' and 'linktrail' and the rng are the start and end position of the link. The 'label' in groups contains everything after the first pipe which might contain additional data which is used in File namespace for example. Alternatively it can be a sequence containing two items where the first must be a Link or Page and the second has almost the same meaning as the result by the callable. It'll convert that into a callable where the first item (the Link or Page) has to be equal to the found link and in that case it will apply the second value from the sequence. :type replace: sequence of pywikibot.Page/pywikibot.Link/str or callable :param site: a Site object to use. It should match the origin or target site of the text :raises TypeError: missing positional argument 'site' :raises ValueError: Wrong site type :raises ValueError: Wrong replacement number :raises ValueError: Wrong replacement types """ def to_link(source): """Return the link from source when it's a Page otherwise itself.""" if isinstance(source, pywikibot.Page): return source._link if isinstance(source, str): return pywikibot.Link(source, site) return source def replace_callable(link, text, groups, rng): if replace_list[0] == link: return replace_list[1] return None def check_classes(replacement): """Normalize the replacement into a list.""" if not isinstance(replacement, (pywikibot.Page, pywikibot.Link)): raise ValueError('The replacement must be None, False, ' 'a sequence, a Link or a str but ' 'is "{}"'.format(type(replacement))) def title_section(link) -> str: title = link.title if link.section: title += '#' + link.section return title if not isinstance(site, pywikibot.site.BaseSite): raise ValueError('The "site" argument must be a BaseSite not {}.' .format(type(site).__name__)) if isinstance(replace, Sequence): if len(replace) != 2: raise ValueError('When used as a sequence, the "replace" ' 'argument must contain exactly 2 items.') replace_list = [to_link(replace[0]), replace[1]] if not isinstance(replace_list[0], pywikibot.Link): raise ValueError( 'The original value must be either str, Link or Page ' 'but is "{}"'.format(type(replace_list[0]))) if replace_list[1] is not False and replace_list[1] is not None: if isinstance(replace_list[1], str): replace_list[1] = pywikibot.Page(site, replace_list[1]) check_classes(replace_list[0]) replace = replace_callable linktrail = site.linktrail() link_pattern = re.compile( r'\[\[(?P<title>.*?)(#(?P<section>.*?))?(\|(?P<label>.*?))?\]\]' r'(?P<linktrail>{})'.format(linktrail)) extended_label_pattern = re.compile(r'(.*?\]\])({})'.format(linktrail)) linktrail = re.compile(linktrail) curpos = 0 # This loop will run until we have finished the current page while True: m = link_pattern.search(text, pos=curpos) if not m: break # Ignore links to sections of the same page if not m.group('title').strip(): curpos = m.end() continue # Ignore interwiki links if (site.isInterwikiLink(m.group('title').strip()) and not m.group('title').strip().startswith(':')): curpos = m.end() continue groups = m.groupdict() if groups['label'] and '[[' in groups['label']: # TODO: Work on the link within the label too # A link within a link, extend the label to the ]] after it extended_match = extended_label_pattern.search(text, pos=m.end()) if not extended_match: # TODO: Unclosed link label, what happens there? curpos = m.end() continue groups['label'] += groups['linktrail'] + extended_match.group(1) groups['linktrail'] = extended_match.group(2) end = extended_match.end() else: end = m.end() start = m.start() # Since this point the m variable shouldn't be used as it may not # contain all contents del m try: link = pywikibot.Link.create_separated( groups['title'], site, section=groups['section'], label=groups['label']) except (SiteDefinitionError, InvalidTitleError): # unrecognized iw prefix or invalid title curpos = end continue # Check whether the link found should be replaced. # Either None, False or tuple(Link, bool) new_link = replace(link, text, groups.copy(), (start, end)) if new_link is None: curpos = end continue # The link looks like this: # [[page_title|new_label]]new_linktrail page_title = groups['title'] new_label = groups['label'] if not new_label: # or like this: [[page_title]]new_linktrail new_label = page_title # remove preleading ":" from the link text if new_label[0] == ':': new_label = new_label[1:] new_linktrail = groups['linktrail'] if new_linktrail: new_label += new_linktrail if new_link is False: # unlink - we remove the section if there's any assert isinstance(new_label, str), 'link text must be str.' new_link = new_label if isinstance(new_link, str): # Nothing good can come out of the fact that bytes is returned so # force unicode text = text[:start] + new_link + text[end:] # Make sure that next time around we will not find this same hit. curpos = start + len(new_link) continue if isinstance(new_link, bytes): raise ValueError('The result must be str and not bytes.') # Verify that it's either Link, Page or str check_classes(new_link) # Use section and label if it's a Link and not otherwise if isinstance(new_link, pywikibot.Link): is_link = True else: new_link = new_link._link is_link = False new_title = new_link.canonical_title() # Make correct langlink if needed if not new_link.site == site: new_title = ':' + new_link.site.code + ':' + new_title if is_link: # Use link's label new_label = new_link.anchor must_piped = new_label is not None new_section = new_link.section else: must_piped = True new_section = groups['section'] if new_section: new_title += '#' + new_section if new_label is None: new_label = new_title # Parse the link text and check if it points to the same page parsed_new_label = pywikibot.Link(new_label, new_link.site) try: parsed_new_label.parse() except InvalidTitleError: pass else: parsed_link_title = title_section(parsed_new_label) new_link_title = title_section(new_link) # compare title, but only with parts if linktrail works if not linktrail.sub('', parsed_link_title[len(new_link_title):]): # TODO: This must also compare everything that was used as a # prefix (in case insensitive) must_piped = ( not parsed_link_title.startswith(new_link_title) or parsed_new_label.namespace != new_link.namespace) if must_piped: new_text = '[[{}|{}]]'.format(new_title, new_label) else: new_text = '[[{}]]{}'.format(new_label[:len(new_title)], new_label[len(new_title):]) text = text[:start] + new_text + text[end:] # Make sure that next time around we will not find this same hit. curpos = start + len(new_text) return text def add_text(text: str, add: str, *, site=None) -> str: """Add text to a page content above categories and interwiki. .. versionadded:: 6.4 :param text: The page content to add text to. :param add: Text to add. :param site: The site that the text is coming from. Required for reorder of categories and interlanguage links. Te default site is used otherwise. :type site: pywikibot.Site """ # Translating the \\n (e.g. from command line) into binary \n add = add.replace('\\n', '\n') # Getting the categories categories_inside = getCategoryLinks(text, site) # Deleting the categories text = removeCategoryLinks(text, site) # Getting the interwiki interwiki_inside = getLanguageLinks(text, site) # Removing the interwiki text = removeLanguageLinks(text, site) # Adding the text text += '\n' + add # Reputting the categories text = replaceCategoryLinks(text, categories_inside, site, addOnly=True) # Adding the interwiki return replaceLanguageLinks(text, interwiki_inside, site) # ------------------------------- # Functions dealing with sections # ------------------------------- _Heading = namedtuple('_Heading', ('text', 'start', 'end')) _Section = namedtuple('_Section', ('title', 'content')) _Content = namedtuple('_Content', ('header', 'sections', 'footer')) def _extract_headings(text: str, site) -> list: """Return _Heading objects.""" headings = [] heading_regex = _get_regexes(['header'], site)[0] for match in heading_regex.finditer(text): start, end = match.span() if not isDisabled(text, start) and not isDisabled(text, end): headings.append(_Heading(match.group(), start, end)) return headings def _extract_sections(text: str, headings) -> list: """Return _Section objects.""" if headings: # Assign them their contents contents = [] for i, heading in enumerate(headings): try: next_heading = headings[i + 1] except IndexError: contents.append(text[heading.end:]) else: contents.append(text[heading.end:next_heading.start]) return [_Section(heading.text, content) for heading, content in zip(headings, contents)] return [] def extract_sections( text: str, site=None ) -> NamedTuple('_Content', [('header', str), # noqa: F821 ('body', List[Tuple[str, str]]), # noqa: F821 ('footer', str)]): # noqa: F821 """ Return section headings and contents found in text. :return: The returned namedtuple contains the text parsed into header, contents and footer parts: The header part is a string containing text part above the first heading. The footer part is also a string containing text part after the last section. The section part is a list of tuples, each tuple containing a string with section heading and a string with section content. Example article:: '''A''' is a thing. == History of A == Some history... == Usage of A == Some usage... [[Category:Things starting with A]] ...is parsed into the following namedtuple:: result = extract_sections(text, site) result.header = "'''A''' is a thing." result.body = [('== History of A ==', 'Some history...'), ('== Usage of A ==', 'Some usage...')] result.footer = '[[Category:Things starting with A]]' .. versionadded:: 3.0 """ headings = _extract_headings(text, site) sections = _extract_sections(text, headings) # Find header and footer contents header = text[:headings[0].start] if headings else text cat_regex, interwiki_regex = _get_regexes(('category', 'interwiki'), site) langlink_pattern = interwiki_regex.pattern.replace(':?', '') last_section_content = sections[-1].content if sections else header footer = re.search( r'({})*\Z'.format(r'|'.join((langlink_pattern, cat_regex.pattern, r'\s'))), last_section_content).group().lstrip() if footer: if sections: sections[-1] = _Section( sections[-1].title, last_section_content[:-len(footer)]) else: header = header[:-len(footer)] return _Content(header, sections, footer) # ----------------------------------------------- # Functions dealing with interwiki language links # ----------------------------------------------- # Note - MediaWiki supports several kinds of interwiki links; two kinds are # inter-language links. We deal here with those kinds only. # A family has by definition only one kind of inter-language links: # 1 - inter-language links inside the own family. # They go to a corresponding page in another language in the same # family, such as from 'en.wikipedia' to 'pt.wikipedia', or from # 'es.wiktionary' to 'ar.wiktionary'. # Families with this kind have several language-specific sites. # They have their interwiki_forward attribute set to None # 2 - language links forwarding to another family. # They go to a corresponding page in another family, such as from # 'commons' to 'zh.wikipedia, or from 'incubator' to 'en.wikipedia'. # Families having those have one member only, and do not have # language-specific sites. The name of the target family of their # inter-language links is kept in their interwiki_forward attribute. # These functions only deal with links of these two kinds only. They # do not find or change links of other kinds, nor any that are formatted # as in-line interwiki links (e.g., "[[:es:Artículo]]". def getLanguageLinks( text: str, insite=None, template_subpage: bool = False ) -> Dict: """ Return a dict of inter-language links found in text. The returned dict uses the site as keys and Page objects as values. It does not contain its own site. Do not call this routine directly, use Page.interwiki() method instead. """ if insite is None: insite = pywikibot.Site() fam = insite.family # when interwiki links forward to another family, retrieve pages & other # infos there if fam.interwiki_forward: fam = Family.load(fam.interwiki_forward) result = {} # Ignore interwiki links within nowiki tags, includeonly tags, pre tags, # and HTML comments include = [] if template_subpage: include = ['includeonly'] text = removeDisabledParts(text, include=include) # This regular expression will find every link that is possibly an # interwiki link. # NOTE: language codes are case-insensitive and only consist of basic latin # letters and hyphens. # TODO: currently, we do not have any, but BCP 47 allows digits, and # underscores. # TODO: There is no semantic difference between hyphens and # underscores -> fold them. interwikiR = re.compile(r'\[\[([a-zA-Z\-]+)\s?:([^\[\]\n]*)\]\]') for lang, pagetitle in interwikiR.findall(text): lang = lang.lower() # Check if it really is in fact an interwiki link to a known # language, or if it's e.g. a category tag or an internal link if lang in fam.obsolete: lang = fam.obsolete[lang] if lang in fam.langs: if '|' in pagetitle: # ignore text after the pipe pagetitle = pagetitle[:pagetitle.index('|')] # we want the actual page objects rather than the titles site = pywikibot.Site(code=lang, fam=fam) # skip language links to its own site if site == insite: continue previous_key_count = len(result) page = pywikibot.Page(site, pagetitle) try: result[page.site] = page # need to trigger page._link.parse() except InvalidTitleError: pywikibot.output('[getLanguageLinks] Text contains invalid ' 'interwiki link [[{}:{}]].' .format(lang, pagetitle)) continue if previous_key_count == len(result): pywikibot.warning('[getLanguageLinks] 2 or more interwiki ' 'links point to site {}.'.format(site)) return result def removeLanguageLinks(text: str, site=None, marker: str = '') -> str: """Return text with all inter-language links removed. If a link to an unknown language is encountered, a warning is printed. :param text: The text that needs to be modified. :param site: The site that the text is coming from. :type site: pywikibot.Site :param marker: If defined, marker is placed after the last language link, or at the end of text if there are no language links. :return: The modified text. """ if site is None: site = pywikibot.Site() # This regular expression will find every interwiki link, plus trailing # whitespace. languages = '|'.join(site.validLanguageLinks() + list(site.family.obsolete.keys())) if not languages: return text interwikiR = re.compile(r'\[\[({})\s?:[^\[\]\n]*\]\][\s]*' .format(languages), re.IGNORECASE) text = replaceExcept(text, interwikiR, '', ['comment', 'math', 'nowiki', 'pre', 'syntaxhighlight'], marker=marker, site=site) return text.strip() def removeLanguageLinksAndSeparator(text: str, site=None, marker: str = '', separator: str = '') -> str: """ Return text with inter-language links and preceding separators removed. If a link to an unknown language is encountered, a warning is printed. :param text: The text that needs to be modified. :param site: The site that the text is coming from. :type site: pywikibot.Site :param marker: If defined, marker is placed after the last language link, or at the end of text if there are no language links. :param separator: The separator string that will be removed if followed by the language links. :return: The modified text """ if separator: mymarker = findmarker(text, '@L@') newtext = removeLanguageLinks(text, site, mymarker) mymarker = expandmarker(newtext, mymarker, separator) return newtext.replace(mymarker, marker) return removeLanguageLinks(text, site, marker) def replaceLanguageLinks(oldtext: str, new: dict, site=None, addOnly: bool = False, template: bool = False, template_subpage: bool = False) -> str: """Replace inter-language links in the text with a new set of links. :param oldtext: The text that needs to be modified. :param new: A dict with the Site objects as keys, and Page or Link objects as values (i.e., just like the dict returned by getLanguageLinks function). :param site: The site that the text is from. :type site: pywikibot.Site :param addOnly: If True, do not remove old language links, only add new ones. :param template: Indicates if text belongs to a template page or not. :param template_subpage: Indicates if text belongs to a template sub-page or not. :return: The modified text. """ # Find a marker that is not already in the text. marker = findmarker(oldtext) if site is None: site = pywikibot.Site() separator = site.family.interwiki_text_separator cseparator = site.family.category_text_separator separatorstripped = separator.strip() cseparatorstripped = cseparator.strip() if addOnly: s2 = oldtext else: s2 = removeLanguageLinksAndSeparator(oldtext, site=site, marker=marker, separator=separatorstripped) s = interwikiFormat(new, insite=site) if not s: newtext = s2.replace(marker, '') elif site.code in site.family.interwiki_attop \ or '<!-- interwiki at top -->' in oldtext: # do not add separator if interwiki links are on one line newtext = s + ('' if site.code in site.family.interwiki_on_one_line else separator) + s2.replace(marker, '').strip() else: # calculate what was after the language links on the page firstafter = s2.find(marker) if firstafter < 0: firstafter = len(s2) else: firstafter += len(marker) # Any text in 'after' part that means we should keep it after? if '</noinclude>' in s2[firstafter:]: if separatorstripped: s = separator + s newtext = (s2[:firstafter].replace(marker, '') + s + s2[firstafter:]) elif site.code in site.family.categories_last: cats = getCategoryLinks(s2, site=site) s2 = removeCategoryLinksAndSeparator( s2.replace(marker, cseparatorstripped).strip(), site) \ + separator + s newtext = replaceCategoryLinks(s2, cats, site=site, addOnly=True) # for Wikitravel's language links position. # (not supported by rewrite - no API) elif site.family.name == 'wikitravel': s = separator + s + separator newtext = (s2[:firstafter].replace(marker, '') + s + s2[firstafter:]) elif template or template_subpage: if template_subpage: includeOn = '<includeonly>' includeOff = '</includeonly>' else: includeOn = '<noinclude>' includeOff = '</noinclude>' separator = '' # Do we have a noinclude at the end of the template? parts = s2.split(includeOff) lastpart = parts[-1] if re.match(r'\s*{}'.format(marker), lastpart): # Put the langlinks back into the noinclude's regexp = re.compile(r'{}\s*{}'.format(includeOff, marker)) newtext = regexp.sub(s + includeOff, s2) else: # Put the langlinks at the end, inside noinclude's newtext = (s2.replace(marker, '').strip() + separator + '{}\n{}{}\n'.format(includeOn, s, includeOff)) else: newtext = s2.replace(marker, '').strip() + separator + s # special parts above interwiki above_interwiki = [] if site.sitename == 'wikipedia:nn': comment = re.compile( r'<!--interwiki \(no(\/nb)?, *sv, *da first; then other languages ' r'alphabetically by name\)-->') above_interwiki.append(comment) if site.family.name == 'wikipedia' and site.code in ('ba', 'crh', 'krc'): comment = re.compile(r'<!-- [Ii]nterwikis -->') above_interwiki.append(comment) if above_interwiki: interwiki = _get_regexes(['interwiki'], site)[0] first_interwiki = interwiki.search(newtext) for reg in above_interwiki: special = reg.search(newtext) if special and not isDisabled(newtext, special.start()): newtext = (newtext[:special.start()].strip() + newtext[special.end():]) newtext = (newtext[:first_interwiki.start()].strip() + special.group() + '\n' + newtext[first_interwiki.start():]) return newtext.strip() def interwikiFormat(links: dict, insite=None) -> str: """Convert interwiki link dict into a wikitext string. :param links: interwiki links to be formatted :type links: dict with the Site objects as keys, and Page or Link objects as values. # noqa: DAR103 :param insite: site the interwiki links will be formatted for (defaulting to the current site). :type insite: BaseSite :return: string including wiki links formatted for inclusion in insite """ if not links: return '' if insite is None: insite = pywikibot.Site() ar = interwikiSort(list(links.keys()), insite) s = [] for site in ar: if isinstance(links[site], pywikibot.Link): links[site] = pywikibot.Page(links[site]) if isinstance(links[site], pywikibot.Page): title = links[site].title(as_link=True, force_interwiki=True, insite=insite) link = title.replace('[[:', '[[') s.append(link) else: raise ValueError('links dict must contain Page or Link objects') if insite.code in insite.family.interwiki_on_one_line: sep = ' ' else: sep = '\n' return sep.join(s) + '\n' def interwikiSort(sites, insite=None): """Sort sites according to local interwiki sort logic.""" if not sites: return [] if insite is None: insite = pywikibot.Site() sites.sort() putfirst = insite.interwiki_putfirst() if putfirst: # In this case I might have to change the order firstsites = [] validlanglinks = insite.validLanguageLinks() for code in putfirst: if code in validlanglinks: site = insite.getSite(code=code) if site in sites: del sites[sites.index(site)] firstsites = firstsites + [site] sites = firstsites + sites return sites # ------------------------------------- # Functions dealing with category links # ------------------------------------- def getCategoryLinks(text: str, site=None, include: Optional[List[str]] = None, expand_text: bool = False) -> List['pywikibot.Category']: """Return a list of category links found in text. :param include: list of tags which should not be removed by removeDisabledParts() and where CategoryLinks can be searched. :return: all category links found """ result = [] if site is None: site = pywikibot.Site() # Ignore category links within nowiki tags, pre tags, includeonly tags, # and HTML comments text = removeDisabledParts(text, include=include or []) catNamespace = '|'.join(site.namespaces.CATEGORY) R = re.compile(r'\[\[\s*(?P<namespace>{})\s*:\s*(?P<rest>.+?)\]\]' .format(catNamespace), re.I) for match in R.finditer(text): if expand_text and '{{' in match.group('rest'): rest = site.expand_text(match.group('rest')) else: rest = match.group('rest') if '|' in rest: title, sortKey = rest.split('|', 1) else: title, sortKey = rest, None try: cat = pywikibot.Category(pywikibot.Link( '%s:%s' % (match.group('namespace'), title), site), sort_key=sortKey) except InvalidTitleError: # Category title extracted contains invalid characters # Likely due to on-the-fly category name creation, see T154309 pywikibot.warning('Invalid category title extracted: {}' .format(title)) else: result.append(cat) return result def removeCategoryLinks(text: str, site=None, marker: str = '') -> str: """Return text with all category links removed. :param text: The text that needs to be modified. :param site: The site that the text is coming from. :type site: pywikibot.Site :param marker: If defined, marker is placed after the last category link, or at the end of text if there are no category links. :return: The modified text. """ # This regular expression will find every link that is possibly an # interwiki link, plus trailing whitespace. The language code is grouped. # NOTE: This assumes that language codes only consist of non-capital # ASCII letters and hyphens. if site is None: site = pywikibot.Site() catNamespace = '|'.join(site.namespaces.CATEGORY) categoryR = re.compile(r'\[\[\s*({})\s*:.*?\]\]\s*' .format(catNamespace), re.I) text = replaceExcept(text, categoryR, '', ['comment', 'includeonly', 'math', 'nowiki', 'pre', 'syntaxhighlight'], marker=marker, site=site) if marker: # avoid having multiple linefeeds at the end of the text text = re.sub(r'\s*{}'.format(re.escape(marker)), '\n' + marker, text.strip()) return text.strip() def removeCategoryLinksAndSeparator(text: str, site=None, marker: str = '', separator: str = '') -> str: """ Return text with category links and preceding separators removed. :param text: The text that needs to be modified. :param site: The site that the text is coming from. :type site: pywikibot.Site :param marker: If defined, marker is placed after the last category link, or at the end of text if there are no category links. :param separator: The separator string that will be removed if followed by the category links. :return: The modified text """ if site is None: site = pywikibot.Site() if separator: mymarker = findmarker(text, '@C@') newtext = removeCategoryLinks(text, site, mymarker) mymarker = expandmarker(newtext, mymarker, separator) return newtext.replace(mymarker, marker) return removeCategoryLinks(text, site, marker) def replaceCategoryInPlace(oldtext, oldcat, newcat, site=None, add_only: bool = False) -> str: """ Replace old category with new one and return the modified text. :param oldtext: Content of the old category :param oldcat: pywikibot.Category object of the old category :param newcat: pywikibot.Category object of the new category :param add_only: If add_only is True, the old category won't be replaced and the category given will be added after it. :return: the modified text """ if site is None: site = pywikibot.Site() catNamespace = '|'.join(site.namespaces.CATEGORY) title = oldcat.title(with_ns=False) if not title: return oldtext # title might contain regex special characters title = case_escape(site.namespaces[14].case, title) # spaces and underscores in page titles are interchangeable and collapsible title = title.replace(r'\ ', '[ _]+').replace(r'\_', '[ _]+') categoryR = re.compile(r'\[\[\s*({})\s*:\s*{}[\s\u200e\u200f]*' r'((?:\|[^]]+)?\]\])' .format(catNamespace, title), re.I) categoryRN = re.compile( r'^[^\S\n]*\[\[\s*({})\s*:\s*{}[\s\u200e\u200f]*' r'((?:\|[^]]+)?\]\])[^\S\n]*\n' .format(catNamespace, title), re.I | re.M) exceptions = ['comment', 'math', 'nowiki', 'pre', 'syntaxhighlight'] if newcat is None: # First go through and try the more restrictive regex that removes # an entire line, if the category is the only thing on that line (this # prevents blank lines left over in category lists following a removal) text = replaceExcept(oldtext, categoryRN, '', exceptions, site=site) text = replaceExcept(text, categoryR, '', exceptions, site=site) elif add_only: text = replaceExcept( oldtext, categoryR, '{}\n{}'.format( oldcat.title(as_link=True, allow_interwiki=False), newcat.title(as_link=True, allow_interwiki=False)), exceptions, site=site) else: text = replaceExcept(oldtext, categoryR, '[[{}:{}\\2' .format(site.namespace(14), newcat.title(with_ns=False)), exceptions, site=site) return text def replaceCategoryLinks(oldtext: str, new, site=None, addOnly: bool = False) -> str: """ Replace all existing category links with new category links. :param oldtext: The text that needs to be replaced. :param new: Should be a list of Category objects or strings which can be either the raw name or [[Category:..]]. :type new: iterable :param site: The site that the text is from. :type site: pywikibot.Site :param addOnly: If addOnly is True, the old category won't be deleted and the category(s) given will be added (and they won't replace anything). :return: The modified text. """ # Find a marker that is not already in the text. marker = findmarker(oldtext) if site is None: site = pywikibot.Site() if re.search(r'\{\{ *(' + r'|'.join(site.getmagicwords('defaultsort')) + r')', oldtext, flags=re.I): separator = '\n' else: separator = site.family.category_text_separator iseparator = site.family.interwiki_text_separator separatorstripped = separator.strip() iseparatorstripped = iseparator.strip() if addOnly: cats_removed_text = oldtext else: cats_removed_text = removeCategoryLinksAndSeparator( oldtext, site=site, marker=marker, separator=separatorstripped) new_cats = categoryFormat(new, insite=site) if new_cats: if site.code in site.family.category_attop: newtext = new_cats + separator + cats_removed_text else: # calculate what was after the categories links on the page firstafter = cats_removed_text.find(marker) if firstafter < 0: firstafter = len(cats_removed_text) else: firstafter += len(marker) # Is there text in the 'after' part that means we should keep it # after? if '</noinclude>' in cats_removed_text[firstafter:]: if separatorstripped: new_cats = separator + new_cats newtext = (cats_removed_text[:firstafter].replace(marker, '') + new_cats + cats_removed_text[firstafter:]) elif site.code in site.family.categories_last: newtext = (cats_removed_text.replace(marker, '').strip() + separator + new_cats) else: interwiki = getLanguageLinks(cats_removed_text, insite=site) langs_removed_text = removeLanguageLinksAndSeparator( cats_removed_text.replace(marker, ''), site, '', iseparatorstripped) + separator + new_cats newtext = replaceLanguageLinks( langs_removed_text, interwiki, site, addOnly=True) else: newtext = cats_removed_text.replace(marker, '') # special parts under categories under_categories = [] if site.sitename == 'wikipedia:de': personendaten = re.compile(r'\{\{ *Personendaten.*?\}\}', re.I | re.DOTALL) under_categories.append(personendaten) if site.sitename == 'wikipedia:yi': stub = re.compile(r'\{\{.*?שטומף *\}\}', re.I) under_categories.append(stub) if site.family.name == 'wikipedia' and site.code in ('simple', 'en'): stub = re.compile(r'\{\{.*?stub *\}\}', re.I) under_categories.append(stub) if under_categories: category = _get_regexes(['category'], site)[0] for last_category in category.finditer(newtext): pass for reg in under_categories: special = reg.search(newtext) if special and not isDisabled(newtext, special.start()): newtext = (newtext[:special.start()].strip() + newtext[special.end():]) newtext = (newtext[:last_category.end()].strip() + '\n' * 2 + special.group() + newtext[last_category.end():]) return newtext.strip() def categoryFormat(categories, insite=None) -> str: """Return a string containing links to all categories in a list. :param categories: A list of Category or Page objects or strings which can be either the raw name, [[Category:..]] or [[cat_localised_ns:...]]. :type categories: iterable :param insite: Used to to localise the category namespace. :type insite: pywikibot.Site :return: String of categories """ if not categories: return '' if insite is None: insite = pywikibot.Site() catLinks = [] for category in categories: if isinstance(category, str): category, separator, sortKey = category.strip('[]').partition('|') sortKey = sortKey if separator else None # whole word if no ":" is present prefix = category.split(':', 1)[0] if prefix not in insite.namespaces[14]: category = '{}:{}'.format(insite.namespace(14), category) category = pywikibot.Category(pywikibot.Link(category, insite, default_namespace=14), sort_key=sortKey) # Make sure a category is casted from Page to Category. elif not isinstance(category, pywikibot.Category): category = pywikibot.Category(category) link = category.aslink() catLinks.append(link) sep = ' ' if insite.category_on_one_line() else '\n' # Some people don't like the categories sorted # catLinks.sort() return sep.join(catLinks) + '\n' # ------------------------------------- # Functions dealing with external links # ------------------------------------- def compileLinkR(withoutBracketed: bool = False, onlyBracketed: bool = False): """Return a regex that matches external links.""" # RFC 2396 says that URLs may only contain certain characters. # For this regex we also accept non-allowed characters, so that the bot # will later show these links as broken ('Non-ASCII Characters in URL'). # Note: While allowing dots inside URLs, MediaWiki will regard # dots at the end of the URL as not part of that URL. # The same applies to comma, colon and some other characters. notAtEnd = r'\]\s\.:;,<>"\|\)' # So characters inside the URL can be anything except whitespace, # closing squared brackets, quotation marks, greater than and less # than, and the last character also can't be parenthesis or another # character disallowed by MediaWiki. notInside = r'\]\s<>"' # The first half of this regular expression is required because '' is # not allowed inside links. For example, in this wiki text: # ''Please see https://www.example.org.'' # .'' shouldn't be considered as part of the link. regex = r'(?P<url>http[s]?://[^{notInside}]*?[^{notAtEnd}]' \ r'(?=[{notAtEnd}]*\'\')|http[s]?://[^{notInside}]*' \ r'[^{notAtEnd}])'.format(notInside=notInside, notAtEnd=notAtEnd) if withoutBracketed: regex = r'(?<!\[)' + regex elif onlyBracketed: regex = r'\[' + regex linkR = re.compile(regex) return linkR # -------------------------------- # Functions dealing with templates # -------------------------------- def extract_templates_and_params(text: str, remove_disabled_parts: bool = False, strip: bool = False) -> ETPType: """Return a list of templates found in text. Return value is a list of tuples. There is one tuple for each use of a template in the page, with the template title as the first entry and a dict of parameters as the second entry. Parameters are indexed by strings; as in MediaWiki, an unnamed parameter is given a parameter name with an integer value corresponding to its position among the unnamed parameters, and if this results multiple parameters with the same name only the last value provided will be returned. This uses the package :py:obj:`mwparserfromhell` or :py:obj:`wikitextparser` as MediaWiki markup parser. It is mandatory that one of them is installed. There are minor differences between the two implementations. The parser packages preserves whitespace in parameter names and values. If there are multiple numbered parameters in the wikitext for the same position, MediaWiki will only use the last parameter value. e.g. `{{a| foo | 2 <!-- --> = bar | baz }}` is `{{a|1=foo|2=baz}}` To replicate that behaviour, enable both `remove_disabled_parts` and `strip` parameters. :param text: The wikitext from which templates are extracted :param remove_disabled_parts: If enabled, remove disabled wikitext such as comments and pre. :param strip: If enabled, strip arguments and values of templates. :return: list of template name and params .. versionchanged:: 6.1 *wikitextparser* package is supported; either *wikitextparser* or *mwparserfromhell* is strictly recommended. """ def explicit(param): try: attr = param.showkey except AttributeError: attr = not param.positional return attr if remove_disabled_parts: text = removeDisabledParts(text) parser_name = wikitextparser.__name__ pywikibot.debug('Using {!r} wikitext parser'.format(parser_name)) result = [] parsed = wikitextparser.parse(text) if parser_name == 'wikitextparser': templates = parsed.templates arguments = 'arguments' else: templates = parsed.ifilter_templates( matches=lambda x: not x.name.lstrip().startswith('#'), recursive=True) arguments = 'params' for template in templates: params = OrderedDict() for param in getattr(template, arguments): value = str(param.value) # mwpfh needs upcast to str if strip: key = param.name.strip() if explicit(param): value = param.value.strip() else: value = str(param.value) else: key = str(param.name) params[key] = value result.append((template.name.strip(), params)) return result def extract_templates_and_params_regex_simple(text: str): """ Extract top-level templates with params using only a simple regex. This function uses only a single regex, and returns an entry for each template called at the top-level of the wikitext. Nested templates are included in the argument values of the top-level template. This method will incorrectly split arguments when an argument value contains a '|', such as {{template|a={{b|c}} }}. :param text: The wikitext from which templates are extracted :return: list of template name and params :rtype: list of tuple of name and OrderedDict """ result = [] for match in NESTED_TEMPLATE_REGEX.finditer(text): name, params = match.group(1), match.group(2) # Special case for {{a}} if params is None: params = [] else: params = params.split('|') numbered_param_identifiers = iter(range(1, len(params) + 1)) params = OrderedDict( arg.split('=', 1) if '=' in arg else (str(next(numbered_param_identifiers)), arg) for arg in params) result.append((name, params)) return result def glue_template_and_params(template_and_params) -> str: """Return wiki text of template glued from params. You can use items from extract_templates_and_params here to get an equivalent template wiki text (it may happen that the order of the params changes). """ template, params = template_and_params text = '' for items in params.items(): text += '|{}={}\n'.format(*items) return '{{{{{}\n{}}}}}'.format(template, text) # -------------------------- # Page parsing functionality # -------------------------- def does_text_contain_section(pagetext: str, section: str) -> bool: """ Determine whether the page text contains the given section title. It does not care whether a section string may contain spaces or underlines. Both will match. If a section parameter contains an internal link, it will match the section with or without a preceding colon which is required for a text link e.g. for categories and files. :param pagetext: The wikitext of a page :param section: a section of a page including wikitext markups """ # match preceding colon for text links section = re.sub(r'\\\[\\\[(\\?:)?', r'\[\[\:?', re.escape(section)) # match underscores and white spaces section = re.sub(r'\\?[ _]', '[ _]', section) m = re.search("=+[ ']*{}[ ']*=+".format(section), pagetext) return bool(m) def reformat_ISBNs(text: str, match_func) -> str: """Reformat ISBNs. :param text: text containing ISBNs :param match_func: function to reformat matched ISBNs :type match_func: callable :return: reformatted text """ isbnR = re.compile(r'(?<=ISBN )(?P<code>[\d\-]+[\dXx])') text = isbnR.sub(match_func, text) return text # --------------------------------------- # Time parsing functionality (Archivebot) # --------------------------------------- class tzoneFixedOffset(datetime.tzinfo): """ Class building tzinfo objects for fixed-offset time zones. :param offset: a number indicating fixed offset in minutes east from UTC :param name: a string with name of the timezone """ def __init__(self, offset: int, name: str) -> None: """Initializer.""" self.__offset = datetime.timedelta(minutes=offset) self.__name = name def utcoffset(self, dt): """Return the offset to UTC.""" return self.__offset def tzname(self, dt): """Return the name of the timezone.""" return self.__name def dst(self, dt): """Return no daylight savings time.""" return datetime.timedelta(0) def __repr__(self) -> str: """Return the internal representation of the timezone.""" return '{}({}, {})'.format( self.__class__.__name__, self.__offset.days * 86400 + self.__offset.seconds, self.__name ) class TimeStripper: """Find timestamp in page and return it as pywikibot.Timestamp object.""" def __init__(self, site=None) -> None: """Initializer.""" self.site = pywikibot.Site() if site is None else site self.origNames2monthNum = {} for n, (_long, _short) in enumerate(self.site.months_names, start=1): self.origNames2monthNum[_long] = n self.origNames2monthNum[_short] = n # in some cases month in ~~~~ might end without dot even if # site.months_names do not. if _short.endswith('.'): self.origNames2monthNum[_short[:-1]] = n self.groups = ['year', 'month', 'hour', 'time', 'day', 'minute', 'tzinfo'] timeR = (r'(?P<time>(?P<hour>([0-1]\d|2[0-3]))[:\.h]' r'(?P<minute>[0-5]\d))') timeznR = r'\((?P<tzinfo>[A-Z]+)\)' yearR = r'(?P<year>(19|20)\d\d)(?:{})?'.format('\ub144') # if months have 'digits' as names, they need to be # removed; will be handled as digits in regex, adding d+{1,2}\.? escaped_months = [_ for _ in self.origNames2monthNum if not _.strip('.').isdigit()] # match longest names first. escaped_months = [re.escape(_) for _ in sorted(escaped_months, reverse=True)] # work around for cs wiki: if month are in digits, we assume # that format is dd. mm. (with dot and spaces optional) # the last one is workaround for Korean if any(_.isdigit() for _ in self.origNames2monthNum): self.is_digit_month = True monthR = r'(?P<month>({})|(?:1[012]|0?[1-9])\.)' \ .format('|'.join(escaped_months)) dayR = r'(?P<day>(3[01]|[12]\d|0?[1-9]))(?:{})' \ r'?\.?\s*(?:[01]?\d\.)?'.format('\uc77c') else: self.is_digit_month = False monthR = r'(?P<month>({}))'.format('|'.join(escaped_months)) dayR = r'(?P<day>(3[01]|[12]\d|0?[1-9]))\.?' self.ptimeR = re.compile(timeR) self.ptimeznR = re.compile(timeznR) self.pyearR = re.compile(yearR) self.pmonthR = re.compile(monthR) self.pdayR = re.compile(dayR) # order is important to avoid mismatch when searching self.patterns = [ self.ptimeR, self.ptimeznR, self.pyearR, self.pmonthR, self.pdayR, ] self._hyperlink_pat = re.compile(r'\[\s*?http[s]?://[^\]]*?\]') self._comment_pat = re.compile(r'<!--(.*?)-->') self._wikilink_pat = re.compile( r'\[\[(?P<link>[^\]\|]*?)(?P<anchor>\|[^\]]*)?\]\]') self.tzinfo = tzoneFixedOffset(self.site.siteinfo['timeoffset'], self.site.siteinfo['timezone']) @staticmethod @deprecated('to_latin_digits() function', since='7.0.0') def fix_digits(line): """Make non-latin digits like Persian to latin to parse. .. deprecated:: 7.0 Use :func:`to_latin_digits` instead. """ return to_latin_digits(line) def _last_match_and_replace(self, txt: str, pat): """ Take the rightmost match and replace with marker. It does so to prevent spurious earlier matches. """ m = None cnt = 0 for m in pat.finditer(txt): cnt += 1 def marker(m): """ Replace exactly the same number of matched characters. Same number of chars shall be replaced, in order to be able to compare pos for matches reliably (absolute pos of a match is not altered by replacement). """ return '@' * (m.end() - m.start()) if m: # month and day format might be identical (e.g. see bug T71315), # avoid to wipe out day, after month is matched. # replace all matches but the last two # (i.e. allow to search for dd. mm.) if pat == self.pmonthR: if self.is_digit_month: if cnt > 2: txt = pat.sub(marker, txt, cnt - 2) else: txt = pat.sub(marker, txt) else: txt = pat.sub(marker, txt) return (txt, m) return (txt, None) @staticmethod def _valid_date_dict_positions(dateDict) -> bool: """Check consistency of reasonable positions for groups.""" time_pos = dateDict['time']['start'] tzinfo_pos = dateDict['tzinfo']['start'] date_pos = sorted( (dateDict['day'], dateDict['month'], dateDict['year']), key=lambda x: x['start']) min_pos, max_pos = date_pos[0]['start'], date_pos[-1]['start'] max_gap = max(x[1]['start'] - x[0]['end'] for x in zip(date_pos, date_pos[1:])) if max_gap > TIMESTAMP_GAP_LIMIT: return False if tzinfo_pos < min_pos or tzinfo_pos < time_pos: return False if min_pos < tzinfo_pos < max_pos: return False if min_pos < time_pos < max_pos: return False return True def timestripper(self, line): """ Find timestamp in line and convert it to time zone aware datetime. All the following items must be matched, otherwise None is returned: -. year, month, hour, time, day, minute, tzinfo :return: A timestamp found on the given line :rtype: pywikibot.Timestamp """ # Try to maintain gaps that are used in _valid_date_dict_positions() def censor_match(match): return '_' * (match.end() - match.start()) # match date fields dateDict = {} # Analyze comments separately from rest of each line to avoid to skip # dates in comments, as the date matched by timestripper is the # rightmost one. most_recent = [] for comment in self._comment_pat.finditer(line): # Recursion levels can be maximum two. If a comment is found, it # will not for sure be found in the next level. # Nested comments are excluded by design. timestamp = self.timestripper(comment.group(1)) most_recent.append(timestamp) # Censor comments. line = self._comment_pat.sub(censor_match, line) # Censor external links. line = self._hyperlink_pat.sub(censor_match, line) for wikilink in self._wikilink_pat.finditer(line): # Recursion levels can be maximum two. If a link is found, it will # not for sure be found in the next level. # Nested links are excluded by design. link, anchor = wikilink.group('link'), wikilink.group('anchor') timestamp = self.timestripper(link) most_recent.append(timestamp) if anchor: timestamp = self.timestripper(anchor) most_recent.append(timestamp) # Censor wikilinks. line = self._wikilink_pat.sub(censor_match, line) # Remove parts that are not supposed to contain the timestamp, in order # to reduce false positives. line = removeDisabledParts(line) line = to_latin_digits(line) for pat in self.patterns: line, match_obj = self._last_match_and_replace(line, pat) if match_obj: for group, value in match_obj.groupdict().items(): start, end = (match_obj.start(group), match_obj.end(group)) # The positions are stored for later validation dateDict[group] = { 'value': value, 'start': start, 'end': end } # all fields matched -> date valid # groups are in a reasonable order. if (all(g in dateDict for g in self.groups) and self._valid_date_dict_positions(dateDict)): # remove 'time' key, now split in hour/minute and not needed # by datetime. del dateDict['time'] # replace month name in original language with month number try: value = self.origNames2monthNum[dateDict['month']['value']] except KeyError: pywikibot.output('incorrect month name "{}" in page in site {}' .format(dateDict['month']['value'], self.site)) raise KeyError else: dateDict['month']['value'] = value # convert to integers and remove the inner dict for k, v in dateDict.items(): if k == 'tzinfo': continue try: dateDict[k] = int(v['value']) except ValueError: raise ValueError( 'Value: {} could not be converted for key: {}.' .format(v['value'], k)) # find timezone dateDict['tzinfo'] = self.tzinfo timestamp = pywikibot.Timestamp(**dateDict) else: timestamp = None most_recent.append(timestamp) try: timestamp = max(ts for ts in most_recent if ts is not None) except ValueError: timestamp = None return timestamp
#!/usr/bin/env python # -*- coding: utf-8 -*- import simplejson as json from alipay.aop.api.FileItem import FileItem from alipay.aop.api.constant.ParamConstants import * from alipay.aop.api.domain.AlipayPassTemplateAddModel import AlipayPassTemplateAddModel class AlipayPassTemplateAddRequest(object): def __init__(self, biz_model=None): self._biz_model = biz_model self._biz_content = None self._version = "1.0" self._terminal_type = None self._terminal_info = None self._prod_code = None self._notify_url = None self._return_url = None self._udf_params = None self._need_encrypt = False @property def biz_model(self): return self._biz_model @biz_model.setter def biz_model(self, value): self._biz_model = value @property def biz_content(self): return self._biz_content @biz_content.setter def biz_content(self, value): if isinstance(value, AlipayPassTemplateAddModel): self._biz_content = value else: self._biz_content = AlipayPassTemplateAddModel.from_alipay_dict(value) @property def version(self): return self._version @version.setter def version(self, value): self._version = value @property def terminal_type(self): return self._terminal_type @terminal_type.setter def terminal_type(self, value): self._terminal_type = value @property def terminal_info(self): return self._terminal_info @terminal_info.setter def terminal_info(self, value): self._terminal_info = value @property def prod_code(self): return self._prod_code @prod_code.setter def prod_code(self, value): self._prod_code = value @property def notify_url(self): return self._notify_url @notify_url.setter def notify_url(self, value): self._notify_url = value @property def return_url(self): return self._return_url @return_url.setter def return_url(self, value): self._return_url = value @property def udf_params(self): return self._udf_params @udf_params.setter def udf_params(self, value): if not isinstance(value, dict): return self._udf_params = value @property def need_encrypt(self): return self._need_encrypt @need_encrypt.setter def need_encrypt(self, value): self._need_encrypt = value def add_other_text_param(self, key, value): if not self.udf_params: self.udf_params = dict() self.udf_params[key] = value def get_params(self): params = dict() params[P_METHOD] = 'alipay.pass.template.add' params[P_VERSION] = self.version if self.biz_model: params[P_BIZ_CONTENT] = json.dumps(obj=self.biz_model.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) if self.biz_content: if hasattr(self.biz_content, 'to_alipay_dict'): params['biz_content'] = json.dumps(obj=self.biz_content.to_alipay_dict(), use_decimal=True, ensure_ascii=False, sort_keys=True, separators=(',', ':')) else: params['biz_content'] = self.biz_content if self.terminal_type: params['terminal_type'] = self.terminal_type if self.terminal_info: params['terminal_info'] = self.terminal_info if self.prod_code: params['prod_code'] = self.prod_code if self.notify_url: params['notify_url'] = self.notify_url if self.return_url: params['return_url'] = self.return_url if self.udf_params: params.update(self.udf_params) return params def get_multipart_params(self): multipart_params = dict() return multipart_params
import logging import pytest from ocs_ci.framework.pytest_customization.marks import tier1, skipif_no_kms from ocs_ci.framework.testlib import MCGTest from ocs_ci.ocs import constants, defaults from ocs_ci.ocs.resources import pod logger = logging.getLogger(__name__) @skipif_no_kms class TestNoobaaKMS(MCGTest): """ Test KMS integration with NooBaa """ @tier1 @pytest.mark.polarion_id("OCS-2485") def test_noobaa_kms_validation(self): """ Validate from logs that there is successfully used NooBaa with KMS integration. """ operator_pod = pod.get_pods_having_label( label=constants.NOOBAA_OPERATOR_POD_LABEL, namespace=defaults.ROOK_CLUSTER_NAMESPACE, )[0] operator_logs = pod.get_pod_logs(pod_name=operator_pod["metadata"]["name"]) assert "found root secret in external KMS successfully" in operator_logs
# mysql/mysqldb.py # Copyright (C) 2005-2021 the SQLAlchemy authors and contributors # <see AUTHORS file> # # This module is part of SQLAlchemy and is released under # the MIT License: http://www.opensource.org/licenses/mit-license.php """ .. dialect:: mysql+mysqldb :name: mysqlclient (maintained fork of MySQL-Python) :dbapi: mysqldb :connectstring: mysql+mysqldb://<user>:<password>@<host>[:<port>]/<dbname> :url: https://pypi.org/project/mysqlclient/ Driver Status ------------- The mysqlclient DBAPI is a maintained fork of the `MySQL-Python <http://sourceforge.net/projects/mysql-python>`_ DBAPI that is no longer maintained. `mysqlclient`_ supports Python 2 and Python 3 and is very stable. .. _mysqlclient: https://github.com/PyMySQL/mysqlclient-python .. _mysqldb_unicode: Unicode ------- Please see :ref:`mysql_unicode` for current recommendations on unicode handling. Using MySQLdb with Google Cloud SQL ----------------------------------- Google Cloud SQL now recommends use of the MySQLdb dialect. Connect using a URL like the following:: mysql+mysqldb://root@/<dbname>?unix_socket=/cloudsql/<projectid>:<instancename> Server Side Cursors ------------------- The mysqldb dialect supports server-side cursors. See :ref:`mysql_ss_cursors`. """ import re from .base import MySQLCompiler from .base import MySQLDialect from .base import MySQLExecutionContext from .base import MySQLIdentifierPreparer from .base import TEXT from ... import sql from ... import util class MySQLExecutionContext_mysqldb(MySQLExecutionContext): @property def rowcount(self): if hasattr(self, "_rowcount"): return self._rowcount else: return self.cursor.rowcount class MySQLCompiler_mysqldb(MySQLCompiler): pass class MySQLIdentifierPreparer_mysqldb(MySQLIdentifierPreparer): pass class MySQLDialect_mysqldb(MySQLDialect): driver = "mysqldb" supports_unicode_statements = True supports_sane_rowcount = True supports_sane_multi_rowcount = True supports_native_decimal = True default_paramstyle = "format" execution_ctx_cls = MySQLExecutionContext_mysqldb statement_compiler = MySQLCompiler_mysqldb preparer = MySQLIdentifierPreparer_mysqldb def __init__(self, **kwargs): super(MySQLDialect_mysqldb, self).__init__(**kwargs) self._mysql_dbapi_version = ( self._parse_dbapi_version(self.dbapi.__version__) if self.dbapi is not None and hasattr(self.dbapi, "__version__") else (0, 0, 0) ) def _parse_dbapi_version(self, version): m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", version) if m: return tuple(int(x) for x in m.group(1, 2, 3) if x is not None) else: return (0, 0, 0) @util.langhelpers.memoized_property def supports_server_side_cursors(self): try: cursors = __import__("MySQLdb.cursors").cursors self._sscursor = cursors.SSCursor return True except (ImportError, AttributeError): return False @classmethod def dbapi(cls): return __import__("MySQLdb") def on_connect(self): super_ = super(MySQLDialect_mysqldb, self).on_connect() def on_connect(conn): if super_ is not None: super_(conn) charset_name = conn.character_set_name() if charset_name is not None: cursor = conn.cursor() cursor.execute("SET NAMES %s" % charset_name) cursor.close() return on_connect def do_ping(self, dbapi_connection): try: dbapi_connection.ping(False) except self.dbapi.Error as err: if self.is_disconnect(err, dbapi_connection, None): return False else: raise else: return True def do_executemany(self, cursor, statement, parameters, context=None): rowcount = cursor.executemany(statement, parameters) if context is not None: context._rowcount = rowcount def _check_unicode_returns(self, connection): # work around issue fixed in # https://github.com/farcepest/MySQLdb1/commit/cd44524fef63bd3fcb71947392326e9742d520e8 # specific issue w/ the utf8mb4_bin collation and unicode returns collation = connection.exec_driver_sql( "show collation where %s = 'utf8mb4' and %s = 'utf8mb4_bin'" % ( self.identifier_preparer.quote("Charset"), self.identifier_preparer.quote("Collation"), ) ).scalar() has_utf8mb4_bin = self.server_version_info > (5,) and collation if has_utf8mb4_bin: additional_tests = [ sql.collate( sql.cast( sql.literal_column("'test collated returns'"), TEXT(charset="utf8mb4"), ), "utf8mb4_bin", ) ] else: additional_tests = [] return super(MySQLDialect_mysqldb, self)._check_unicode_returns( connection, additional_tests ) def create_connect_args(self, url, _translate_args=None): if _translate_args is None: _translate_args = dict( database="db", username="user", password="passwd" ) opts = url.translate_connect_args(**_translate_args) opts.update(url.query) util.coerce_kw_type(opts, "compress", bool) util.coerce_kw_type(opts, "connect_timeout", int) util.coerce_kw_type(opts, "read_timeout", int) util.coerce_kw_type(opts, "write_timeout", int) util.coerce_kw_type(opts, "client_flag", int) util.coerce_kw_type(opts, "local_infile", int) # Note: using either of the below will cause all strings to be # returned as Unicode, both in raw SQL operations and with column # types like String and MSString. util.coerce_kw_type(opts, "use_unicode", bool) util.coerce_kw_type(opts, "charset", str) # Rich values 'cursorclass' and 'conv' are not supported via # query string. ssl = {} keys = ["ssl_ca", "ssl_key", "ssl_cert", "ssl_capath", "ssl_cipher"] for key in keys: if key in opts: ssl[key[4:]] = opts[key] util.coerce_kw_type(ssl, key[4:], str) del opts[key] if ssl: opts["ssl"] = ssl # FOUND_ROWS must be set in CLIENT_FLAGS to enable # supports_sane_rowcount. client_flag = opts.get("client_flag", 0) client_flag_found_rows = self._found_rows_client_flag() if client_flag_found_rows is not None: client_flag |= client_flag_found_rows opts["client_flag"] = client_flag return [[], opts] def _found_rows_client_flag(self): if self.dbapi is not None: try: CLIENT_FLAGS = __import__( self.dbapi.__name__ + ".constants.CLIENT" ).constants.CLIENT except (AttributeError, ImportError): return None else: return CLIENT_FLAGS.FOUND_ROWS else: return None def _extract_error_code(self, exception): return exception.args[0] def _detect_charset(self, connection): """Sniff out the character set in use for connection results.""" try: # note: the SQL here would be # "SHOW VARIABLES LIKE 'character_set%%'" cset_name = connection.connection.character_set_name except AttributeError: util.warn( "No 'character_set_name' can be detected with " "this MySQL-Python version; " "please upgrade to a recent version of MySQL-Python. " "Assuming latin1." ) return "latin1" else: return cset_name() _isolation_lookup = set( [ "SERIALIZABLE", "READ UNCOMMITTED", "READ COMMITTED", "REPEATABLE READ", "AUTOCOMMIT", ] ) def _set_isolation_level(self, connection, level): if level == "AUTOCOMMIT": connection.autocommit(True) else: connection.autocommit(False) super(MySQLDialect_mysqldb, self)._set_isolation_level( connection, level ) dialect = MySQLDialect_mysqldb
from abc import ABC, abstractmethod from project.baked_food.baked_food import BakedFood from project.core.validator import Validator from project.drink.drink import Drink class Table(ABC): def __init__(self, table_number: int, capacity: int): self.table_number = table_number self.capacity = capacity self.food_orders = [] self.drink_orders = [] self.number_of_people = 0 self.is_reserved = False @property def table_number(self): return self.__table_number @table_number.setter def table_number(self, value): Validator.raise_if_number_is_not_in_range( value, self.min_table_number, self.max_table_number, self.table_number_error_message) self.__table_number = value @property def capacity(self): return self.__capacity @capacity.setter def capacity(self, value): Validator.raise_if_number_is_zero_or_negative(value, 'Capacity has to be greater than 0!') self.__capacity = value @property @abstractmethod def min_table_number(self): pass @property @abstractmethod def max_table_number(self): pass @property @abstractmethod def table_number_error_message(self): pass def reserve(self, number_of_people: int): self.is_reserved = True self.number_of_people = number_of_people def order_food(self, baked_food: BakedFood): self.food_orders.append(baked_food) def order_drink(self, drink: Drink): self.drink_orders.append(drink) def get_bill(self): return sum(f.price for f in self.food_orders) + sum(d.price for d in self.drink_orders) def clear(self): self.food_orders = [] self.drink_orders = [] self.number_of_people = 0 self.is_reserved = False def free_table_info(self): if not self.is_reserved: return f'Table: {self.table_number}\n' + \ f'Type: {self.__class__.__name__}\n' + \ f'Capacity: {self.capacity}'
import serial # Note for version one - using USB connection # device = '/dev/ttyUSB0' baud = 9600 device = '/dev/ttyS0' ser = serial.Serial(device,9600) while True: line=ser.readline() line = line.strip() if line: print(line)
import broker import re from stix2 import Indicator, Sighting from threatbus.data import ( Operation, Subscription, ThreatBusSTIX2Constants, Unsubscription, ) from threatbus.stix2_helpers import is_point_equality_ioc, split_object_path_and_value from typing import Union from urllib.parse import urlparse # See the documentation for the Zeek INTEL framework [1] and STIX-2 cyber # observable objects [2] # [1] https://docs.zeek.org/en/stable/scripts/base/frameworks/intel/main.zeek.html#type-Intel::Type # [2] https://docs.oasis-open.org/cti/stix/v2.1/cs01/stix-v2.1-cs01.html#_mlbmudhl16lr zeek_intel_type_map = { "domain-name:value": "DOMAIN", "email-addr:value": "EMAIL", "file:name": "FILE_NAME", "file:hashes.MD5": "FILE_HASH", "file:hashes.'SHA-1'": "FILE_HASH", "file:hashes.'SHA-256'": "FILE_HASH", "file:hashes.'SHA-512'": "FILE_HASH", "file:hashes.'SHA3-256'": "FILE_HASH", "file:hashes.'SHA3-512'": "FILE_HASH", "file:hashes.SSDEEP": "FILE_HASH", "file:hashes.TLSH": "FILE_HASH", "ipv4-addr:value": "ADDR", "ipv6-addr:value": "ADDR", "software:name": "SOFTWARE", "url:value": "URL", "user:user_id": "USER_NAME", "user:account_login": "USER_NAME", "x509-certificate:hashes.'SHA-1'": "CERT_HASH", # Zeek only supports SHA-1 } def map_management_message( broker_data, module_namespace: str, logger ) -> Union[Subscription, Unsubscription, None]: """ Maps a management message to an actionable instruction for Threat Bus. @param broker_data The raw data that was received via broker @param module_namespace A Zeek namespace to accept events from @return A Subscription/Unsubscription object or None in case there is no valid mapping. """ event = broker.zeek.Event(broker_data) name, args = event.name(), event.args() module_namespace = module_namespace + "::" if module_namespace else "" name = name[name.startswith(module_namespace) and len(module_namespace) :] if name == "subscribe" and len(args) == 2: (topic, snapshot_delta) = args if topic: return Subscription(topic, snapshot_delta) elif name == "unsubscribe" and len(args) == 1: topic = args[0] if topic: return Unsubscription(topic) logger.debug(f"Discarding Broker management message with unknown type: {name}") return None def map_broker_event_to_sighting(broker_data, module_namespace, logger): """ Maps a Broker message, based on the event name, to a STIX-2 indicator or STIX-2 Sighting. @param broker_data The raw data that was received via broker @param module_namespace A Zeek namespace to accept events from """ event = broker.zeek.Event(broker_data) name, args = event.name(), event.args() module_namespace = module_namespace + "::" if module_namespace else "" name = name[name.startswith(module_namespace) and len(module_namespace) :] if name != "sighting" or len(args) != 3: if logger: logger.debug(f"Discarding Broker event with unknown type: {name}") return None # convert args to STIX-2 sighting (timestamp, ioc_id, context) = args return Sighting( sighting_of_ref=str(ioc_id), last_seen=timestamp, custom_properties={ ThreatBusSTIX2Constants.X_THREATBUS_SIGHTING_CONTEXT.value: context }, ) def map_indicator_to_broker_event( indicator: Indicator, module_namespace: str, logger ) -> Union[broker.zeek.Event, None]: """ Maps STIX-2 Indicators to Broker events using the Zeek Intel format @see https://docs.zeek.org/en/current/scripts/base/frameworks/intel/main.zeek.html#type-Intel::Type @param indicator The STIX-2 Indicator to convert @param module_namespace A Zeek namespace to use for sending the event @return The mapped broker event or None """ if type(indicator) is not Indicator: logger.debug(f"Discarding message, expected STIX-2 Indicator: {indicator}") return None if not is_point_equality_ioc(indicator.pattern): logger.debug( f"Zeek only supports point-IoCs. Cannot map compound pattern to a Zeek Intel item: {indicator.pattern}" ) return None object_path, ioc_value = split_object_path_and_value(indicator.pattern) # get matching Zeek intel type zeek_type = zeek_intel_type_map.get(object_path, None) if not zeek_type: logger.debug( f"No matching Zeek type found for STIX-2 indicator type '{object_path}'" ) return None if zeek_type == "URL": # remove leading protocol, if any parsed = urlparse(ioc_value) scheme = f"{parsed.scheme}://" ioc_value = parsed.geturl().replace(scheme, "", 1) elif zeek_type == "ADDR" and re.match(".+/.+", ioc_value): # elevate to subnet if possible zeek_type = "SUBNET" operation = "ADD" ## Zeek operation to add a new Intel item if ( ThreatBusSTIX2Constants.X_THREATBUS_UPDATE.value in indicator and indicator.x_threatbus_update == Operation.REMOVE.value ): operation = "REMOVE" return broker.zeek.Event( f"{module_namespace}::intel", (indicator.created, str(indicator.id), zeek_type, ioc_value, operation), )
import os import re import json import typing import argparse def check_special_char(string: str) -> bool: """ Check if a string contains special characters. :param string: The input string. :return: True if there are special characters. """ # regex = re.compile('[@_!#$%^&*()<>?/\\|}{~:]') regex = re.compile('[@_!#$%^&*<>?|/\\}{~:]') if not regex.search(string): return False return True def parse_head(head_string: str) -> (str, str): """ Parse the `Title` section of README file and get the title and description. :param head_string: A string containing title, description and images. :return: Stripped title and description strings. """ parts = list(filter(bool, head_string.splitlines())) if len(parts) < 3: raise Exception('README description parse failure!') title = parts[0].lstrip('# ').rstrip() description = parts[1].strip() return title, description def parse_apis(apis_string: str) -> typing.List[str]: """ Parse the `Relevant API` section and get a list of APIs. :param apis_string: A string containing all APIs. :return: A sorted list of stripped API names. """ apis = list(filter(bool, apis_string.splitlines())) if not apis: raise Exception('README Relevant API parse failure!') return sorted([api.lstrip('*- ').rstrip() for api in apis]) def parse_tags(tags_string: str) -> typing.List[str]: """ Parse the `Tags` section and get a list of tags. :param tags_string: A string containing all tags, with comma or newline as delimiter. :return: A sorted list of stripped tags. """ tags = re.split(r'[,\n]', tags_string) if not tags: raise Exception('README Tags parse failure!') tags = [x for x in tags if x != ''] return sorted([tag.strip() for tag in tags]) def get_folder_name_from_path(path: str) -> str: """ Get the folder name from a full path. :param path: A string of a full/absolute path to a folder. :return: The folder name. """ return os.path.normpath(path).split(os.path.sep)[-1] class MetadataUpdater: def __init__(self, folder_path: str, single_update: bool = False): """ The standard format of metadata.json for Android platform. Read more at: https://devtopia.esri.com/runtime/common-samples/wiki/README.metadata.json """ self.category = '' # Populate from json. self.description = '' # Populate from README. self.formal_name = '' # Populate from README. self.ignore = False # Default to False. self.images = [] # Populate from folder paths. self.keywords = [] # Populate from README. self.language = '' # Populate from folder paths. self.provision_from = [] # Populate from json. self.provision_to = [] # Populate from json. self.redirect_from = [] # Populate from json. self.relevant_apis = [] # Populate from README. self.snippets = [] # Populate from folder paths. self.title = '' # Populate from README. self.folder_path = folder_path self.folder_name = get_folder_name_from_path(folder_path) self.readme_path = os.path.join(folder_path, 'README.md') self.json_path = os.path.join(folder_path, 'README.metadata.json') self.single_update = single_update def get_source_code_paths(self) -> typing.List[str]: """ Traverse the directory and get all filenames for source code. Ignores any code files in the `/build/` directory. :return: A list of java or kotlin source code filenames starting from `/src/`. """ results = [] for dp, dn, filenames in os.walk(self.folder_path): if ("/build/" not in dp): for file in filenames: extension = os.path.splitext(file)[1] if extension in ['.java'] or extension in ['.kt']: # get the programming language of the sample self.language = 'java' if extension in ['.java'] else 'kotlin' # get the snippet path snippet = os.path.join(dp, file) if snippet.startswith(self.folder_path): # add 1 to remove the leading slash snippet = snippet[len(self.folder_path):] results.append(snippet) if not results: raise Exception('Unable to get java/kotlin source code paths.') return sorted(results) def get_images_paths(self): """ Traverse the directory and get all filenames for images in the top level directory. :return: A list of image filenames. """ results = [] list_subfolders_with_paths = [f.name for f in os.scandir(self.folder_path) if f.is_file()] for file in list_subfolders_with_paths: if os.path.splitext(file)[1].lower() in ['.png']: results.append(file) if not results: raise Exception('Unable to get images paths.') return sorted(results) def populate_from_json(self) -> None: """ Read 'category', 'redirect_from', 'provision_to', and 'provision_from' fields from json, as they should not be changed. """ try: json_file = open(self.json_path, 'r') json_data = json.load(json_file) except Exception as err: print(f'Error reading JSON - {self.json_path} - {err}') raise err else: json_file.close() keys = json_data.keys() for key in ['category']: if key in keys: setattr(self, key, json_data[key]) if 'redirect_from' in keys: if isinstance(json_data['redirect_from'], str): self.redirect_from = [json_data['redirect_from']] elif isinstance(json_data['redirect_from'], typing.List): self.redirect_from = json_data['redirect_from'] else: print(f'No redirect_from in - {self.json_path}, abort.') if 'provision_from' in keys: if isinstance(json_data['provision_from'], str): self.provision_from = [json_data['provision_from']] elif isinstance(json_data['provision_from'], typing.List): self.provision_from = json_data['provision_from'] else: print(f'No provision_from in - {self.json_path}, abort.') if 'provision_to' in keys: if isinstance(json_data['provision_to'], str): self.provision_to = [json_data['provision_to']] elif isinstance(json_data['provision_to'], typing.List): self.provision_to = json_data['provision_to'] else: print(f'No provision_to in - {self.json_path}, abort.') def populate_from_readme(self) -> None: """ Read and parse the sections from README, and fill in the 'title', 'description', 'relevant_apis' and 'keywords' fields in the dictionary for output json. """ try: readme_file = open(self.readme_path, 'r') # read the readme content into a string readme_contents = readme_file.read() except Exception as err: print(f"Error reading README - {self.readme_path} - {err}.") raise err else: readme_file.close() # Use regex to split the README by exactly 2 pound marks, so that they # are separated into paragraphs. pattern = re.compile(r'^#{2}(?!#)\s(.*)', re.MULTILINE) readme_parts = re.split(pattern, readme_contents) try: api_section_index = readme_parts.index('Relevant API') + 1 tags_section_index = readme_parts.index('Tags') + 1 self.title, self.description = parse_head(readme_parts[0]) # create a formal name key from a pascal case version of the title # with parentheses removed. formal_name = ''.join(x for x in self.title.title() if not x.isspace()) self.formal_name = re.sub('[()]','', formal_name) if check_special_char(self.title + self.description): print(f'Info: special char in README - {self.folder_name}') self.relevant_apis = parse_apis(readme_parts[api_section_index]) keywords = parse_tags(readme_parts[tags_section_index]) # Do not include relevant apis in the keywords self.keywords = [w for w in keywords if w not in self.relevant_apis] # This is left in from the iOS script: # "It combines the Tags and the Relevant APIs in the README." # See /runtime/common-samples/wiki/README.metadata.json#keywords self.keywords += self.relevant_apis except Exception as err: print(f'Error parsing README - {self.readme_path} - {err}.') raise err def populate_from_paths(self) -> None: """ Populate source code and image filenames from a sample's folder. """ try: self.images = self.get_images_paths() self.snippets = self.get_source_code_paths() except Exception as err: print(f"Error parsing paths - {self.folder_name} - {err}.") raise err def flush_to_json(self, path_to_json: str) -> None: """ Write the metadata to a json file. :param path_to_json: The path to the json file. """ data = dict() if not self.category and self.single_update: data["category"] = "TODO" else: data["category"] = self.category data["description"] = self.description data["formal_name"] = self.formal_name data["ignore"] = self.ignore data["images"] = self.images data["keywords"] = self.keywords data["language"] = self.language if self.provision_from: data["provision_from"] = self.provision_from elif self.single_update: data["provision_from"] = "TODO" if self.provision_to: data["provision_to"] = self.provision_to elif self.single_update: data["provision_to"] = "TODO" if self.redirect_from and self.redirect_from[0] is not '': data["redirect_from"] = self.redirect_from elif self.single_update: data["redirect_from"] = "TODO" data["relevant_apis"] = self.relevant_apis data["snippets"] = self.snippets data["title"] = self.title with open(path_to_json, 'w+') as json_file: json.dump(data, json_file, indent=4, sort_keys=True) json_file.write('\n') def update_1_sample(path: str): """ Fixes 1 sample's metadata by running the script on a single sample's directory. """ single_updater = MetadataUpdater(path, True) try: single_updater.populate_from_json() single_updater.populate_from_readme() single_updater.populate_from_paths() except Exception: print(f'Error populate failed for - {single_updater.folder_name}.') return single_updater.flush_to_json(os.path.join(path, 'README.metadata.json')) def main(): # Initialize parser. msg = 'Metadata helper script. Run it against the top level folder of an ' \ 'Android platform language (ie. kotlin or java) with the -m flag ' \ 'or against a single sample using the -s flag and passing in eg. kotlin/my-sample-dir' parser = argparse.ArgumentParser(description=msg) parser.add_argument('-m', '--multiple', help='input directory of the language') parser.add_argument('-s', '--single', help='input directory of the sample') args = parser.parse_args() if args.multiple: category_root_dir = args.multiple category_name = get_folder_name_from_path(category_root_dir) print(f'Processing category - `{category_name}`...') list_subfolders_with_paths = [f.path for f in os.scandir(category_root_dir) if f.is_dir()] for current_path in list_subfolders_with_paths: print(current_path) updater = MetadataUpdater(current_path) try: updater.populate_from_json() updater.populate_from_readme() updater.populate_from_paths() except Exception: print(f'Error populate failed for - {updater.folder_name}.') continue updater.flush_to_json(updater.json_path) elif args.single: update_1_sample(args.single) else: update_1_sample() print('Invalid arguments, abort.') if __name__ == '__main__': # Use main function for a full category. main() # Use test function for a single sample. # update_1_sample()
# input_lines = '''\ # (3x3)XYZ # X(8x2)(3x3)ABCY # (27x12)(20x12)(13x14)(7x10)(1x12)A # (25x3)(3x3)ABC(2x3)XY(5x2)PQRSTX(18x9)(3x2)TWO(5x7)SEVEN'''.splitlines() input_lines = open('input.txt') NORMAL, MARKER, REPEAT = list(range(3)) def count_decompress(line): result = 0 chars = iter(line) state = NORMAL while True: char = next(chars, None) if char is None: break if char == ' ': continue if state == NORMAL: if char == '(': size = '' repeat = '' state = MARKER else: result += 1 elif state == MARKER: if char == 'x': state = REPEAT else: size += char elif state == REPEAT: if char == ')': pattern = ''.join(next(chars) for _ in range(int(size))) result += count_decompress(pattern) * int(repeat) state = NORMAL else: repeat += char return result for line in input_lines: print(count_decompress(line))
from spira.core.typed_list import TypedList from spira.core.transformable import Transformable from spira.core.parameters.variables import FloatParameter from spira.core.parameters.descriptor import ParameterDescriptor from spira.core.parameters.restrictions import RestrictType from spira.yevon.geometry.ports.base import __Port__ __all__ = ['PortList', 'PortListParameter'] class PortList(TypedList, Transformable): __item_type__ = __Port__ # port_angle_decision = FloatParameter(default=0.0) port_angle_decision = FloatParameter(default=90.0) def __repr__(self): if len(self._list) == 0: print('PortList is empty') return '\n'.join('{}'.format(k) for k in enumerate(self._list)) def __str__(self): return self.__repr__() def __getitem__(self, key): from spira.yevon.geometry.ports.base import __Port__ if isinstance(key, int): return self._list[key] elif isinstance(key, str): for p in self._list: if p.name == key: return p elif issubclass(type(key), __Port__): for p in self._list: if p == key: return p else: return self.get_port_from_label(key) def __contains__(self, item): for p in self._list: # if p.name == item.name: if p == item: return True return False def __delitem__(self, key): for i in range(0, len(self._list)): if self._list[i] is key: return list.__delitem__(self._list, i) def __sub__(self, other): pass def __or__(self, other): pass def union(self, other): return self.__or__(self, other) def intersection(self, other): return self.__and__(self, other) def difference(self, other): return self.__sub__(self, other) def update_layercopy(self, layer): P = self.__class__() for p in self._list: p.edgelayer = layer P.append(p) return P def flat_copy(self, level=-1): el = PortList() for e in self._list: el += e.flat_copy(level) return el def move(self, position): for c in self._list: c.move(position) return self def move_copy(self, position): T = self.__class__() for c in self._list: T.append(c.movecopy(position)) return T def transform_copy(self, transformation): T = self.__class__() for c in self._list: T.append(c.transform_copy(transformation)) return T def transform(self, transformation): for c in self._list: c.transform(transformation) return self def invert(self): for c in self._list: c.invert() return self def invert_copy(self): L = self.__class__() for c in self._list: L += c.invertcopy() return L def x_sorted(self): return self.__class__(sorted(self._list, key=lambda f: f.position[0])) def x_sorted_backward(self): return self.__class__(sorted(self._list, key=lambda f: (-f.position[0]))) def y_sorted(self): return self.__class__(sorted(self._list, key=lambda f: f.position[1])) def y_sorted_backward(self): return self.__class__(sorted(self._list, key=lambda f: (-f.position[1]))) def sorted_in_direction(self, direction): if direction == NORTH: return self.y_sorted() elif direction == SOUTH: return self.y_sorted_backward() elif direction == EAST: return self.x_sorted() elif direction == WEST: return self.x_sorted_backward() else: raise AttributeError("Direction should be NORTH, EAST, SOUTH or WEST") def angle_sorted(self, reference_angle=0.0): """ sorts ports by angle, using angles between the reference_angle and reference_angle+360 """ return self.__class__(sorted(self._list, key=lambda f: ((f.orientation - reference_angle) % 360.0))) def angle_sorted_backward(self, reference_angle=0.0): """ sorts ports by angle, using angles between the reference_angle and reference_angle+360 """ return self.__class__(sorted(self._list, key=lambda f: (-(f.orientation - reference_angle) % 360.0))) def get_names(self): names = [] for p in self._list: names.append(p.name) return names def get_ports_within_angles(self, start_angle, end_angle): pl = self.__class__() aspread = (end_angle - start_angle) % 360.0 sa = start_angle % 360.0 ea = sa + aspread for p in self._list: a = (p.orientation - sa) % 360.0 if a <= aspread: pl.append(p) return pl def get_ports_on_process(self, process): pl = self.__class__() for p in self._list: if p.process == process: pl.append(p) return pl def get_ports_by_purpose(self, purpose): pl = self.__class__() for p in self._list: if p.purpose == purpose: pl.append(p) return pl def get_ports_by_type(self, port_type): pl = self.__class__() if port_type == 'D': for p in self._list: if p.name[0] == 'D': pl.append() return pl @property def west_ports(self): start_angle = 180.0 - 0.5 * self.port_angle_decision end_angle = 180.0 + 0.5 * self.port_angle_decision return self.get_ports_within_angles(start_angle, end_angle) @property def east_ports(self): start_angle = -0.5 * self.port_angle_decision end_angle = +0.5 * self.port_angle_decision return self.get_ports_within_angles(start_angle, end_angle) @property def north_ports(self): start_angle = 90.0 - 0.5 * self.port_angle_decision end_angle = 90.0 + 0.5 * self.port_angle_decision return self.get_ports_within_angles(start_angle, end_angle) @property def south_ports(self): start_angle = 270.0 - 0.5 * self.port_angle_decision end_angle = 270.0 + 0.5 * self.port_angle_decision return self.get_ports_within_angles(start_angle, end_angle) @property def unlock(self): """ Unlock the edge and convert it to a port. """ for i, p in enumerate(self._list): name = p.name.replace('E', 'P') self._list[i] = p.copy(name=name) return self class PortListParameter(ParameterDescriptor): from spira.yevon.geometry.ports.port_list import PortList __type__ = PortList def __init__(self, default=[], **kwargs): kwargs['default'] = self.__type__(default) kwargs['restrictions'] = RestrictType([self.__type__]) super().__init__(**kwargs) def __repr__(self): return '' def __str__(self): return '' def call_param_function(self, obj): f = self.get_param_function(obj) value = f(self.__type__()) if value is None: value = self.__type__() self.__cache_parameter_value__(obj, value) new_value = self.__get_parameter_value__(obj) return new_value def __cache_parameter_value__(self, obj, ports): if isinstance(ports, self.__type__): super().__cache_parameter_value__(obj, ports) elif isinstance(ports, list): super().__cache_parameter_value__(obj, self.__type__(ports)) else: raise TypeError("Invalid type in setting value of PortListParameter: " + str(type(ports))) def __set__(self, obj, ports): if isinstance(ports, self.__type__): self.__externally_set_parameter_value__(obj, ports) elif isinstance(ports, list): self.__externally_set_parameter_value__(obj, self.__type__(ports)) else: raise TypeError("Invalid type in setting value of PortListParameter: " + str(type(ports))) return
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT from ....testing import assert_equal from ..model import SegStats def test_SegStats_inputs(): input_map = dict(annot=dict(argstr='--annot %s %s %s', mandatory=True, xor=(u'segmentation_file', u'annot', u'surf_label'), ), args=dict(argstr='%s', ), avgwf_file=dict(argstr='--avgwfvol %s', ), avgwf_txt_file=dict(argstr='--avgwf %s', ), brain_vol=dict(argstr='--%s', ), brainmask_file=dict(argstr='--brainmask %s', ), calc_power=dict(argstr='--%s', ), calc_snr=dict(argstr='--snr', ), color_table_file=dict(argstr='--ctab %s', xor=(u'color_table_file', u'default_color_table', u'gca_color_table'), ), cortex_vol_from_surf=dict(argstr='--surf-ctx-vol', ), default_color_table=dict(argstr='--ctab-default', xor=(u'color_table_file', u'default_color_table', u'gca_color_table'), ), empty=dict(argstr='--empty', ), environ=dict(nohash=True, usedefault=True, ), etiv=dict(argstr='--etiv', ), etiv_only=dict(), euler=dict(argstr='--euler', ), exclude_ctx_gm_wm=dict(argstr='--excl-ctxgmwm', ), exclude_id=dict(argstr='--excludeid %d', ), frame=dict(argstr='--frame %d', ), gca_color_table=dict(argstr='--ctab-gca %s', xor=(u'color_table_file', u'default_color_table', u'gca_color_table'), ), ignore_exception=dict(nohash=True, usedefault=True, ), in_file=dict(argstr='--i %s', ), in_intensity=dict(argstr='--in %s --in-intensity-name %s', ), intensity_units=dict(argstr='--in-intensity-units %s', requires=[u'in_intensity'], ), mask_erode=dict(argstr='--maskerode %d', ), mask_file=dict(argstr='--mask %s', ), mask_frame=dict(requires=[u'mask_file'], ), mask_invert=dict(argstr='--maskinvert', ), mask_sign=dict(), mask_thresh=dict(argstr='--maskthresh %f', ), multiply=dict(argstr='--mul %f', ), non_empty_only=dict(argstr='--nonempty', ), partial_volume_file=dict(argstr='--pv %s', ), segment_id=dict(argstr='--id %s...', ), segmentation_file=dict(argstr='--seg %s', mandatory=True, xor=(u'segmentation_file', u'annot', u'surf_label'), ), sf_avg_file=dict(argstr='--sfavg %s', ), subcort_gm=dict(argstr='--subcortgray', ), subjects_dir=dict(), summary_file=dict(argstr='--sum %s', genfile=True, position=-1, ), supratent=dict(argstr='--supratent', ), surf_label=dict(argstr='--slabel %s %s %s', mandatory=True, xor=(u'segmentation_file', u'annot', u'surf_label'), ), terminal_output=dict(nohash=True, ), total_gray=dict(argstr='--totalgray', ), vox=dict(argstr='--vox %s', ), wm_vol_from_surf=dict(argstr='--surf-wm-vol', ), ) inputs = SegStats.input_spec() for key, metadata in list(input_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(inputs.traits()[key], metakey), value def test_SegStats_outputs(): output_map = dict(avgwf_file=dict(), avgwf_txt_file=dict(), sf_avg_file=dict(), summary_file=dict(), ) outputs = SegStats.output_spec() for key, metadata in list(output_map.items()): for metakey, value in list(metadata.items()): yield assert_equal, getattr(outputs.traits()[key], metakey), value
import os import subprocess import sys from datetime import datetime, timedelta import pandas as pd import numpy as np def get_main_head_rev(repo_path): cdr = os.getcwd() os.chdir(repo_path) command = "git show-ref --heads -s" process = subprocess.Popen( command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdoutput, stderroutput = process.communicate() gitrev = stdoutput.decode("utf-8").strip() os.chdir(cdr) return gitrev # function to get period, start_date, and end_date tuple for period def get_period_range(start_year=2014, end_year=2017): period = 0 for ayear in range(start_year, end_year): # remember, december must have end date as jan 1st of next year for amonth in range(1, 13): period += 1 # start from period 0 and count start_date = datetime(ayear, amonth, 1) if amonth == 12: end_date = datetime(ayear + 1, 1, 1) else: end_date = datetime(ayear, amonth + 1, 1) yield (period, start_date, end_date) def count_contribs_in_git_log( start_date, end_date, no_merges=False, merges_only=False, ): # BUG: realted to how contribs are counted # we have a sliding window to asses if developer is first time contrib # but this puts the count for total contribs and committers off # need to have fix date for considering new contrib if no_merges: nmerges = " --no-merges" else: nmerges = "" if merges_only: mrgsonly = " --merges" else: mrgsonly = "" # we check a year back to see new contributors command = "git log --after='{start_date}' --before='{end_date}'{nmerges} "\ "{merges_only} --reverse --pretty='%aE\t%at\t%cE'".format( start_date=start_date - timedelta(days=365), end_date=end_date, nmerges=nmerges, merges_only=mrgsonly, ) # print("WORKING DIR IS:", os.getcwd()) # print(command) # debugging output = str(subprocess.check_output( command, shell=True), encoding="utf-8").strip() # print(cnt) # debugging data = [x.split("\t") for x in output.split("\n") if x] # print(data) # lets start counting contribs and committers new_contrib = [] new_commiters = [] contribs = {} commits = {} commits_others = {} all_contribs = {} all_commits = {} for auth, tm, comm in data: cdtime = datetime.fromtimestamp(int(tm)) if not auth: continue # find whoever is a first time contrib within our period if (cdtime >= start_date and auth not in all_contribs.keys() and auth not in all_commits.keys()): # print("adding new contrib:", auth) # print("add_contribs:", all_contribs) # print("add_commits:", all_commits) new_contrib.append((auth, tm)) # find whoever is a first time committer # and is committing work of others if (cdtime >= start_date and auth != comm and comm not in all_commits.keys()): new_commiters.append((comm, tm)) # find whoever is a committer (other) within our period # meaning, this is a committer that committed then # work of others (author != committer) if cdtime >= start_date and auth != comm: commits_others[comm] = commits_others.setdefault(comm, 0) + 1 # total count for the period if cdtime >= start_date: contribs[auth] = contribs.setdefault(auth, 0) + 1 commits[comm] = commits.setdefault(comm, 0) + 1 # in all commits to used to correctly find new contribs all_contribs[auth] = all_contribs.setdefault(auth, 0) + 1 all_commits[comm] = all_commits.setdefault(comm, 0) + 1 # we can calculate work distribution for committers and contribs # we need max, min, mean, median, and sd # do we need name of max contributor or committer? # maybe their percentage? total_sum_contribs = sum(contribs.values()) total_sum_commits = sum(commits.values()) total_sum_commits_for_others = sum(commits_others.values()) if total_sum_contribs: contrib_ratios = sorted( [x / total_sum_contribs for x in contribs.values()]) else: contrib_ratios = [0] if total_sum_commits: commit_ratios = sorted( [x / total_sum_commits for x in commits.values()]) else: commit_ratios = [0] if total_sum_commits_for_others: commit_for_others_ratios = sorted([x / total_sum_commits_for_others for x in commits_others.values()]) else: commit_for_others_ratios = [0] contribs_values = list(contribs.values()) commits_values = list(commits.values()) commits_others_values = list(commits_others.values()) # print(contribs) return { "new_contribs": len(new_contrib), "new_commiters": len(new_commiters), "total_contribs": len(contribs.keys()), "committers_for_others": len(commits_others.keys()), "total_committers": len(commits.keys()), "mean_contribs": (contribs_values or None) and np.mean(contribs_values), "median_contribs": (contribs_values or None) and np.median(contribs_values), "std_contribs": (contribs_values or None) and np.std(contribs_values), "max_contribs": (contribs_values or None) and np.max(contribs_values), "min_contribs": (contribs_values or None) and np.min(contribs_values), "top_conrib_ratio": contrib_ratios[-1], "bottom_conrib_ratio": contrib_ratios[0], "mean_contrib_ratio": (contrib_ratios or None) and np.mean(contrib_ratios), "median_contrib_ratio": (contrib_ratios or None) and np.median(contrib_ratios), "conrib_ratio": contrib_ratios, "mean_commits": (commits_values or None) and np.mean(commits_values), "median_commits": (commits_values or None) and np.median(commits_values), "std_commits": (commits_values or None) and np.std(commits_values), "max_commits": (commits_values or None) and np.max(commits_values), "min_commits": (commits_values or None) and np.min(commits_values), "top_commits_ratio": commit_ratios[-1], "bottom_commits_ratio": commit_ratios[0], "mean_commits_ratio": (commit_ratios or None) and np.mean(commit_ratios), "median_commits_ratio": (commit_ratios or None) and np.median(commit_ratios), "commits_ratio": commit_ratios, "mean_commits_others": (commits_others_values or None) and np.mean(commits_others_values), "median_commits_others": (commits_others_values or None) and np.median(commits_others_values), "std_commits_others": (commits_others_values or None) and np.std(commits_others_values), "max_commits_others": (commits_others_values or None) and np.max(commits_others_values), "min_commits_others": (commits_others_values or None) and np.min(commits_others_values), "top_commits_others_ratio": commit_for_others_ratios[-1], "bottom_commits_others_ratio": commit_for_others_ratios[0], "mean_commit_for_others_ratios": (commit_for_others_ratios or None) and np.mean(commit_for_others_ratios), "median_commit_for_others_ratios": (commit_for_others_ratios or None) and np.median(commit_for_others_ratios), "commit_for_others_ratios": commit_for_others_ratios, } def count_occurance_in_git_log( grep, start_date, end_date, invert_grep=False, no_merges=False, merges_only=False, is_perl_regex=False, ): invertg = "" pregex = "" if invert_grep: invertg = " --invert-grep" if is_perl_regex: pregex = " --perl-regexp" if no_merges: nmerges = " --no-merges" else: nmerges = "" if merges_only: mrgsonly = " --merges" else: mrgsonly = "" command = "git log --after='{start_date}' --before='{end_date}'{nmerges} "\ "{merges_only} --pretty='%h' -i --grep='{grep}'{invertg}{pregex} | wc -l".format( start_date=start_date, end_date=end_date, grep=grep, invertg=invertg, nmerges=nmerges, merges_only=mrgsonly, pregex=pregex, ) # print("WORKING DIR IS:", os.getcwd()) # print(command) # debugging cnt = subprocess.check_output(command, shell=True) # print(cnt) # debugging return int(cnt.strip()) def list_git_log_revs( grep, start_date, end_date, invert_grep=False, no_merges=False, merges_only=False, is_perl_regex=False): invertg = "" pregex = "" if invert_grep: invertg = " --invert-grep" if is_perl_regex: pregex = " --perl-regexp" if no_merges: nmerges = " --no-merges" else: nmerges = "" if merges_only: mrgsonly = " --merges" else: mrgsonly = "" command = "git log --after='{start_date}' --before='{end_date}'{nmerges} "\ "{mrgsonly} --pretty='%H' -i --grep='{grep}'{invertg}{pregex}".format( start_date=start_date, end_date=end_date, grep=grep, invertg=invertg, nmerges=nmerges, mrgsonly=mrgsonly, pregex=pregex, ) # print(command) lst = str(subprocess.check_output(command, shell=True), "utf-8") # print(cnt) return [x.strip() for x in lst.split("\n") if x] def get_stats_from_diff(r1, r2): values = [0, 0, 0] command = "git diff {} {} --shortstat".format(r1, r2) try: output = str(subprocess.check_output(command, shell=True), "utf-8") except: print("problem in diff {}, {} calling process".format(r1, r2)) return values output = output.split(",") for item in output: try: val = int(item.strip().split()[0]) if "file" in item: values[0] += val elif "insert" in item: values[1] += val elif "delet" in item: values[2] += val else: print("got an unknown item in:", output) except: print("problem in diff {}, {} item:".format(r1, r2), output) return values def get_first_commit_timestamp(): command = "git rev-list --max-parents=0 HEAD| xargs git log --pretty='%at'" output = str(subprocess.check_output(command, shell=True), "utf-8") revs = [x for x in output.split("\n") if x] return datetime.fromtimestamp(int(revs[-1])) def get_tags_and_dates(): command = "git tag" tags = str(subprocess.check_output( command, shell=True), "utf-8").split("\n") command = "git tag | xargs -L 1 git log --pretty='%at' -1" timestamps = str(subprocess.check_output( command, shell=True), "utf-8").split("\n") dates = [datetime.fromtimestamp(int(x)).date() for x in timestamps if x] return list(zip(tags, dates)) def get_tags_between_dates(start_date, end_date, tags_dates): return [ t for t, d in tags_dates if start_date <= d < end_date ] def get_git_stats_for_project(full_name, start_year, # inclusive end_year, # non-inclusive repos_dir="./repos" ): prid_name = full_name.replace("/", "_") repo_path = os.path.join(repos_dir, prid_name) # correct_dates = [date.strftime("%Y-%m-%d") for date in dates] # switch to head revision get_main_head_rev(repo_path) # switch to revision in desired date header = [ "full_name", "date", "end_date", "period", "total_revs", "first_rev", "last_rev", "new_contribs", "new_committers", "total_contribs", "committers_for_others", "total_committers", "mean_contribs", "median_contribs", "std_contribs", "max_contribs", "min_contribs", "top_conrib_ratio", "bottom_conrib_ratio", "conrib_ratio", "mean_commits", "median_commits", "std_commits", "max_commits", "min_commits", "top_commits_ratio", "bottom_commits_ratio", "commits_ratio", "mean_commits_others", "median_commits_others", "std_commits_others", "max_commits_others", "min_commits_others", "top_commits_others_ratio", "bottom_commits_others_ratio", "commit_for_others_ratios", "initial_commit_date", "age_days", "releases", "no_releases", "refactors", "fixes_can_be_doc", "not_fixes_can_be_doc", "fixes_and_doc", "fixes_no_doc", "docs", "commits_no_merge", "merges", "total_commits", "files_changed_churn", "loc_added_churn", "loc_removed_churn", "files_changed_delta", "loc_added_delta", "loc_removed_delta", ] # This is a special function that uses git # so we have to change working dir cdr = os.getcwd() os.chdir(repo_path) tags_n_dates = get_tags_and_dates() data = [] for period, start_date, end_date in get_period_range(start_year, end_year): rev_list = list_git_log_revs("", start_date, end_date) total_items = len(rev_list) if not total_items: print("no revisions between {} and {}".format( start_date, end_date)) continue print("working on period {}, start: {}, end: {}".format( period, start_date, end_date)) data_row = [] data_row.append(full_name) data_row.append(start_date) data_row.append(end_date) data_row.append(period) # revision data data_row.append(total_items) data_row.append(rev_list[0]) data_row.append(rev_list[-1]) # committers and contribs com_data = count_contribs_in_git_log(start_date, end_date) data_row.append(com_data["new_contribs"]) data_row.append(com_data["new_commiters"]) data_row.append(com_data["total_contribs"]) data_row.append(com_data["committers_for_others"]) data_row.append(com_data["total_committers"]) data_row.append(com_data["mean_contribs"]) data_row.append(com_data["median_contribs"]) data_row.append(com_data["std_contribs"]) data_row.append(com_data["max_contribs"]) data_row.append(com_data["min_contribs"]) data_row.append(com_data["top_conrib_ratio"]) data_row.append(com_data["bottom_conrib_ratio"]) data_row.append(com_data["conrib_ratio"]) data_row.append(com_data["mean_commits"]) data_row.append(com_data["median_commits"]) data_row.append(com_data["std_commits"]) data_row.append(com_data["max_commits"]) data_row.append(com_data["min_commits"]) data_row.append(com_data["top_commits_ratio"]) data_row.append(com_data["bottom_commits_ratio"]) data_row.append(com_data["commits_ratio"]) data_row.append(com_data["mean_commits_others"]) data_row.append(com_data["median_commits_others"]) data_row.append(com_data["std_commits_others"]) data_row.append(com_data["max_commits_others"]) data_row.append(com_data["min_commits_others"]) data_row.append(com_data["top_commits_others_ratio"]) data_row.append(com_data["bottom_commits_others_ratio"]) data_row.append(com_data["commit_for_others_ratios"]) # releases and age init_commit_date = get_first_commit_timestamp().date() data_row.append(init_commit_date) data_row.append((datetime.now().date() - init_commit_date).days) # releases and no_releases tags = get_tags_between_dates( start_date.date(), end_date.date(), tags_n_dates) data_row.append(",".join(tags)) data_row.append(len(tags)) # execute gitstat process refactors = count_occurance_in_git_log( "refactor", start_date, end_date, no_merges=True) fixes_can_be_doc = count_occurance_in_git_log( "(bug|fix|resolv)", start_date, end_date, is_perl_regex=True, no_merges=True) not_fixes_can_be_doc = count_occurance_in_git_log( "(bug|fix|resolv)", start_date, end_date, is_perl_regex=True, invert_grep=True, no_merges=True) fixes_and_doc = count_occurance_in_git_log( "(?=.*?doc)(?=.*?(bug|fix|resolv))", start_date, end_date, is_perl_regex=True, no_merges=True) fixes_no_doc = fixes_can_be_doc - fixes_and_doc docs = count_occurance_in_git_log("doc", start_date, end_date, no_merges=True) commits_no_merge = fixes_can_be_doc + not_fixes_can_be_doc merges = count_occurance_in_git_log( "", start_date, end_date, no_merges=False, merges_only=True) total_commits = commits_no_merge + merges data_row.extend([ refactors, fixes_can_be_doc, not_fixes_can_be_doc, fixes_and_doc, fixes_no_doc, docs, commits_no_merge, merges, total_commits ]) # calculate churn data # rev_list = list_git_log_revs("", start_date, end_date) # total_items = len(rev_list) churn = [0, 0, 0] deltas = [0, 0, 0] if total_items: left_revs = rev_list[:-1] # add the rev before the first rev left_revs.insert(0, "{}^1".format(rev_list[0])) right_revs = rev_list[:] revs = zip(left_revs, right_revs) # print("{} period: {} total items ".format( # project, period, total_items)) # calculate deltas here deltas = get_stats_from_diff(rev_list[0], rev_list[-1]) # print("deltas: ", deltas) # calculate churn here for i, rs in enumerate(revs): from operator import add churn = list( map(add, churn, get_stats_from_diff(rs[0], rs[1]))) data_row.extend(churn) data_row.extend(deltas) # print("appending: ", data_row) data.append(data_row) # must switch back to working dir os.chdir(cdr) return pd.DataFrame(data, columns=header) if __name__ == '__main__': print("==== DF:\n", get_git_stats_for_project( "tensorflow/tensorflow", 2019, 2020 ).head().T)
"""django_books URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/2.2/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: path('', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: path('', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.urls import include, path 2. Add a URL to urlpatterns: path('blog/', include('blog.urls')) """ from django.contrib import admin from django.urls import path, include from django.views.generic import RedirectView urlpatterns = [ path('', RedirectView.as_view(permanent=False, url='/books/')), path('admin/', admin.site.urls), path('books/', include('books.urls')), path('api/v1/', include('api.urls', namespace='api')), path('select2/', include('django_select2.urls')), ]
import pytest import torch import torch_optimizer as optim def assert_sparse_not_supported(optimizer_class, err_msg=None): param = torch.randn(1, 1).to_sparse().requires_grad_(True) grad = torch.randn(1, 1).to_sparse() param.grad = grad optimizer = optimizer_class([param]) optimizer.zero_grad() with pytest.raises(RuntimeError) as ctx: optimizer.step() msg = err_msg or 'does not support sparse gradients' assert msg in str(ctx.value) no_sparse_optimizers = [ optim.AdaBound, optim.AdaMod, optim.DiffGrad, optim.Lamb, optim.NovoGrad, optim.Yogi, ] @pytest.mark.parametrize('optimizer_class', no_sparse_optimizers) def test_sparse_not_supported(optimizer_class): assert_sparse_not_supported(optimizer_class) optimizers = [ optim.AccSGD, optim.AdaBelief, optim.AdaBound, optim.AdaMod, optim.AdamP, optim.AggMo, optim.Apollo, optim.DiffGrad, optim.LARS, optim.Lamb, optim.MADGRAD, optim.NovoGrad, optim.PID, optim.QHAdam, optim.QHM, optim.SGDP, optim.SGDW, optim.SWATS, optim.Shampoo, optim.Yogi, ] @pytest.mark.parametrize('optimizer_class', optimizers) def test_learning_rate(optimizer_class): lr = -0.01 with pytest.raises(ValueError) as ctx: optimizer_class(None, lr=-0.01) msg = 'Invalid learning rate: {}'.format(lr) assert msg in str(ctx.value) eps_optimizers = [ optim.AdaBelief, optim.AdaBound, optim.AdaMod, optim.AdamP, optim.Apollo, optim.DiffGrad, optim.LARS, optim.Lamb, optim.MADGRAD, optim.NovoGrad, optim.QHAdam, optim.SGDP, optim.SWATS, optim.Yogi, ] @pytest.mark.parametrize('optimizer_class', eps_optimizers) def test_eps_validation(optimizer_class): eps = -0.1 with pytest.raises(ValueError) as ctx: optimizer_class(None, lr=0.1, eps=eps) msg = 'Invalid epsilon value: {}'.format(eps) assert msg in str(ctx.value) weight_decay_optimizers = [ optim.AccSGD, optim.AdaBelief, optim.AdaBound, optim.AdaMod, optim.Adafactor, optim.AdamP, optim.AggMo, optim.Apollo, optim.DiffGrad, optim.LARS, optim.Lamb, optim.MADGRAD, optim.NovoGrad, optim.PID, optim.QHAdam, optim.QHM, optim.SGDP, optim.SGDW, optim.SWATS, optim.Shampoo, optim.Yogi, ] @pytest.mark.parametrize('optimizer_class', weight_decay_optimizers) def test_weight_decay_validation(optimizer_class): weight_decay = -0.1 with pytest.raises(ValueError) as ctx: optimizer_class(None, lr=0.1, weight_decay=weight_decay) msg = 'Invalid weight_decay value: {}'.format(weight_decay) assert msg in str(ctx.value) betas_optimizers = [ optim.AdaBelief, optim.AdaBound, optim.AdaMod, optim.AdamP, optim.DiffGrad, optim.Lamb, optim.NovoGrad, optim.QHAdam, optim.Yogi, ] @pytest.mark.parametrize('optimizer_class', betas_optimizers) def test_betas_validation(optimizer_class): betas = (-1, 0.999) with pytest.raises(ValueError) as ctx: optimizer_class(None, lr=0.1, betas=(-1, 0.999)) msg = 'Invalid beta parameter at index 0: {}'.format(betas[0]) assert msg in str(ctx.value) betas = (0.9, -0.999) with pytest.raises(ValueError) as ctx: optimizer_class(None, lr=0.1, betas=betas) msg = 'Invalid beta parameter at index 1: {}'.format(betas[1]) assert msg in str(ctx.value)
# Scientific Library from pandas import HDFStore # Standard Library import json from pathlib import Path import warnings # Third Party import pyarrow as pa import pyarrow.dataset as ds import pyarrow.parquet as pq from tqdm.auto import tqdm # First Party from metadamage import utils class Parquet: def __init__(self, filename): self.filename = Path(filename) self.custom_meta_key = "metadamage" def __str__(self): return f"Parquet file: '{self.filename}'" def __repr__(self): return f"Parquet('{self.filename}')" def load_metadata(self): schema = pq.read_schema(self.filename) metadata_json = schema.metadata[self.custom_meta_key.encode()] metadata = json.loads(metadata_json) return metadata def _load_table(self, shortname=None, tax_id=None, columns=None): filename = self.filename if shortname is not None: filename = filename / f"{shortname}.parquet" if tax_id is None: filters = None else: filters = [("tax_id", "==", tax_id)] if isinstance(columns, str): columns = [columns] table = pq.read_table(filename, filters=filters, columns=columns) return table def _table_to_pandas(self, table): df = table.to_pandas() if "tax_id" in df.columns: df = df.astype({"tax_id": "category"}) return df def load(self, shortname=None, tax_id=None, columns=None): table = self._load_table(shortname, tax_id=tax_id, columns=columns) df = self._table_to_pandas(table) return df def _add_metadata_to_table(self, table, metadata): if metadata is None: metadata = {} custom_meta_json = json.dumps(metadata) updated_metadata = { self.custom_meta_key.encode(): custom_meta_json.encode(), **table.schema.metadata, } return table.replace_schema_metadata(updated_metadata) def _df_to_table_with_metadata(self, df, metadata): table = pa.Table.from_pandas(df) table = self._add_metadata_to_table(table, metadata) return table def save(self, df, metadata=None): utils.init_parent_folder(self.filename) table = self._df_to_table_with_metadata(df, metadata) # pq.write_to_dataset(table, self.filename, partition_cols=partition_cols) pq.write_table(table, self.filename, version="2.0") # def append(self, df, metadata=None, forced=False): # table = self._df_to_table_with_metadata(df, metadata) # writer = pq.ParquetWriter(self.filename, table.schema) # writer.write_table(table=table) def exists(self, forced=False): return self.filename.exists() and not forced class HDF5: def load(self, filename, key): with HDFStore(filename, mode="r") as hdf: df = hdf.select(key) metadata = hdf.get_storer(key).attrs.metadata return df, metadata def load_multiple_keys(self, filename, keys): all_dfs = [] with HDFStore(filename, mode="r") as hdf: for key in tqdm(keys): df_tmp = hdf.select(key) all_dfs.append(df_tmp) # metadata = hdf.get_storer(key).attrs.metadata return all_dfs def save(self, df, filename, key, metadata=None): utils.init_parent_folder(filename) if metadata is None: metadata = {} with warnings.catch_warnings(): message = "object name is not a valid Python identifier" warnings.filterwarnings("ignore", message=message) with HDFStore(filename, mode="a") as hdf: hdf.append(key, df, format="table", data_columns=True) hdf.get_storer(key).attrs.metadata = metadata def get_keys(self, filename): with HDFStore(filename, mode="r") as hdf: keys = list(set(hdf.keys())) # remove meta keys keys = sorted([key for key in keys if not "/meta/" in key]) return keys #%% # from tqdm.auto import tqdm # filename_hdf5 = "./data/out/hdf5_test.hdf5" # keys_hdf5 = IO_HDF5().get_keys(filename_hdf5) # # all_df = [] # # for key in tqdm(keys_hdf5): # # df_tmp, metadata = IO_HDF5().load( # # filename=filename_hdf5, key="counts/KapK-12-1-24-Ext-1-Lib-1-Index2" # # ) # # all_df.append(df_tmp) # all_df = IO_HDF5().load_multiple_keys(filename=filename_hdf5, keys=keys_hdf5) # df_counts1 = pd.concat(all_df, axis="index", ignore_index=True) # def concatenate(dfs, **kwargs): # """Concatenate while preserving categorical columns. # NB: We change the categories in-place for the input dataframes""" # from pandas.api.types import union_categoricals # import pandas as pd # # Iterate on categorical columns common to all dfs # for col in set.intersection( # *[set(df.select_dtypes(include="category").columns) for df in dfs] # ): # # Generate the union category across dfs for this column # uc = union_categoricals([df[col] for df in dfs]) # # Change to union category for all dataframes # for df in dfs: # df[col] = pd.Categorical(df[col].values, categories=uc.categories) # return pd.concat(dfs, **kwargs) # df_counts2 = concatenate(all_df, axis="index", ignore_index=True) # df_counts1.memory_usage(deep=True) / 1e6 # df_counts1.memory_usage(deep=True).sum() / 1e6 # df_counts2.memory_usage(deep=True) / 1e6 # df_counts2.memory_usage(deep=True).sum() / 1e6 # IO_HDF5().save( # df=df_counts2, # filename=filename_hdf5, # key="counts_combined", # ) # df_counts_hdf5, metadata_hdf5 = IO_HDF5().load( # filename=filename_hdf5, key="counts_combined" # ) # df_counts_hdf5.memory_usage(deep=True) / 1e6 # df_counts_hdf5.memory_usage(deep=True).sum() / 1e6 # df_counts_parquet = IO_Parquet().load("./data/out/parquet_test") # df_counts_parquet.memory_usage(deep=True) / 1e6 # df_counts_parquet.memory_usage(deep=True).sum() / 1e6 # df_counts_parquet.dtypes # # %timeit IO_HDF5().load(filename=filename_hdf5, key="counts_combined") # # %timeit IO_Parquet().load("./data/out/parquet_test") # filename = # pq.read_table("./data/out/parquet_test", filters=[('shortname', '=', "EC-Ext-14-Lib-14-Index1")]) # pq.read_table("./data/out/parquet_test", filters=[('shortname', '=', "EC-Ext-14-Lib-14-Index1")]).to_pandas() # %timeit pq.read_table("./data/out/parquet_test", filters=[('shortname', '=', "EC-Ext-14-Lib-14-Index1")]) # %timeit pq.read_table("./data/out/parquet_test/EC-Ext-14-Lib-14-Index1.parquet") # filename = "./data/out/pq_test.parquet" # metadata = cfg.to_dict() # table = pa.Table.from_pandas(df) # table = IO_Parquet()._add_metadata_to_table(table, metadata) # pq.write_table(table, filename, version="2.0") # pq.read_schema(filename) # pq.read_table(filename) # pq.read_table(filename) # pq.read_table(filename, read_dictionary=["tax_name"]) # pd.read_parquet(filename).dtypes # df_counts_parquet, metadata_parquet = IO_Parquet().load( # "./data/out/parquet_test/Lok-75-Sample-4b-Ext-A26-Lib26A-Index1.parquet" # ) # # # df.dtypes # IO_Parquet().save( # filename=f"./data/out/{cfg.shortname}.parquet", # df=df, # metadata=cfg.to_dict(), # ) # df_counts_parquet, metadata_parquet = IO_Parquet().load( # "./data/out/KapK-198A-Ext-55-Lib-55-Index1.parquet" # ) # df_counts_parquet.dtypes # # %timeit IO_HDF5().load(filename=filename_hdf5, key="counts_combined") # # df3 = pd.read_parquet( # # path="./data/out/counts", # # engine="pyarrow", # # # columns=['shortname'], # # filters=[("shortname", "=", "KapK-12-1-24-Ext-1-Lib-1-Index2"), ("tax_id", "=", "1")], # # ) # # df3 # pd.read_parquet( # path="./data/out/counts/KapK-12-1-24-Ext-1-Lib-1-Index2.parquet" # ).dtypes # filename_parquet = "./data/out/parquet_test.parquet" # df_counts, metadata = IO_Parquet().load(filename_parquet, shortname=None) # df3 = pd.read_parquet( # path="./data/out/parquet_test.parquet/", # engine="pyarrow", # # columns=['shortname'], # # filters=[("shortname", "=", "XXX"), ("tax_id", "=", "1")], # ) # pd.read_parquet( # path="./data/out/parquet_test.parquet/shortname=KapK-12-1-24-Ext-1-Lib-1-Index2/207fe0f890e94c61b2602bd673b37d55.parquet" # ).dtypes # IO_Parquet().save( # filename="./data/out/test", # df=df_counts_hdf5, # metadata=cfg.to_dict(), # # partition_cols="shortname", # ) # def f_test(x): # print(x) # return "-".join(x) + "1.parquet" # table = pa.Table.from_pandas(df_counts_hdf5) # table2 = IO_Parquet()._update_table_metadata(table, metadata=cfg.to_dict()) # pq.write_to_dataset( # table2, # root_path="./data/out/test", # partition_cols=["shortname"], # partition_filename_cb=f_test, # ) # pq.write_table(table2, "example.parquet", version="2.0") # # https://issues.apache.org/jira/browse/ARROW-6114 # pd.read_parquet(path="./data/out/test").dtypes # pd.read_parquet(path="example.parquet").dtypes # # if False: # # # pass # # # else: # # df1 = df # .iloc[:10] # # df2 = df1.copy(deep=True) # # df2["shortname"] = "XXX" # # categories = ["tax_id", "tax_name", "tax_rank", "strand", "shortname", "df_type"] # # df2 = utils.downcast_dataframe(df2, categories) # # df3 = pd.DataFrame.from_dict( # # { # # "a": range(2), # # "b": np.random.randn(2), # # "c": ["a", "b"], # # "shortname": ["KapK-198A-Ext-55-Lib-55-Index1", "XXX"], # # "df_type": ["fit_results", "fit_results"], # # } # # ) # # df4 = pd.DataFrame.from_dict( # # { # # "a": [2, 3], # # "b": np.random.randn(2), # # "c": ["c", "d"], # # "shortname": ["KapK-198A-Ext-55-Lib-55-Index1", "XXX"], # # "df_type": ["fit_results", "fit_results"], # # } # # ) # # # %time save_hdf5_test(df1, df2, df3, df4, cfg) # # # %timeit df_hdf5, metadata_hdf5, df2_hdf5, metadata2_hdf5 = load_hdf5_test() # # save_hdf5_test(df1, df2, df3, df4, cfg) # # df_hdf5, metadata_hdf5, df2_hdf5, metadata2_hdf5 = load_hdf5_test() # # # %time save_parquet_test(df1, df2, df3, df4, cfg) # # # %timeit df_parquet, metadata_parquet, df2_parquet, metadata2_parquet = load_parquet_test() # # save_parquet_test(df1, df2, df3, df4, cfg) # # df_parquet, metadata_parquet, df2_parquet, metadata2_parquet = load_parquet_test() # # # df1.to_parquet( # # # path="analytics", # # # engine="pyarrow", # # # compression="snappy", # # # partition_cols=["shortname"], # # # ) # # # df2.to_parquet( # # # path="analytics", # # # engine="pyarrow", # # # compression="snappy", # # # partition_cols=["shortname"], # # # ) # # # # df3 = pd.read_parquet( # # # # path="analytics", # # # # engine="pyarrow", # # # # # columns=['shortname'], # # # # filters=[("shortname", "=", "XXX"), ("tax_id", "=", "1")], # # # # ) # # # # pd.read_parquet( # # # # path="analytics", # # # # engine="pyarrow", # # # # # columns=['shortname'], # # # # # filters=[('shortname', '=', 'XXX'), ('tax_id', '=', '1')] # # # # ) # # # # index not important for counts # # # table1 = pa.Table.from_pandas(df, preserve_index=False) # # # table2 = pa.Table.from_pandas(df2, preserve_index=False) # # # # pq.write_table(table, "example.parquet", version="2.0") # # # # # Local dataset write # # # # pq.write_to_dataset(table, root_path="dataset_name", partition_cols=["shortname"]) # # # # table3 = pq.read_table("dataset_name") # # # # table3.to_pandas() # # # #%% # # # # table = table3 # # # # Path("parquet_dataset").mkdir(exist_ok=True) # # # # pq.write_table(table, "parquet_dataset/data1.parquet") # # # # pq.write_table(table2, "parquet_dataset/data2.parquet") # # # # dataset = ds.dataset("parquet_dataset", format="parquet") # # # # dataset.files # # # # print(dataset.schema.to_string(show_field_metadata=False)) # # # # dataset.to_table().to_pandas() # # # # dataset.to_table(columns=["tax_name", "shortname"]).to_pandas() # # # # dataset.to_table(filter=ds.field("tax_id") == 1).to_pandas() # # # # ds.field("a") != 3 # # # # ds.field("a").isin([1, 2, 3]) # # # for table in [table1, table2]: # # # pq.write_to_dataset( # # # table, # # # "parquet_dataset_partitioned", # # # partition_cols=["df_type", "shortname"], # # # ) # # # dataset = ds.dataset( # # # "parquet_dataset_partitioned", # # # format="parquet", # # # partitioning="hive", # important to retreave the shortname column # # # ) # # # dataset.files # # # dataset.to_table().to_pandas() # # # dataset.to_table(filter=ds.field("shortname") == "XXX").to_pandas() # # # table_different = pa.table( # # # { # # # "a": range(2), # # # "b": np.random.randn(2), # # # "c": ["a", "b"], # # # "shortname": ["KapK-198A-Ext-55-Lib-55-Index1", "XXX"], # # # "df_type": ["fit_results", "fit_results"], # # # } # # # ) # # # table_different.to_pandas() # # # pq.write_to_dataset( # # # table_different, # # # "parquet_dataset_partitioned", # # # partition_cols=["df_type", "shortname"], # # # ) # # # dataset = ds.dataset( # # # "parquet_dataset_partitioned", # # # format="parquet", # # # partitioning="hive", # important to retreave the shortname column # # # ) # # # dataset.files # # # dataset.to_table(filter=ds.field("df_type") == "counts").to_pandas() # # # dataset.to_table(filter=ds.field("df_type") == "fit_results").to_pandas()
from _struct import * from _struct import _clearcache
# coding: utf-8 from __future__ import absolute_import from datetime import date, datetime # noqa: F401 from typing import List, Dict # noqa: F401 from swagger_server.models.base_model_ import Model from swagger_server import util class InventoryItem(Model): """NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ def __init__(self, id: str=None, host: str=None): # noqa: E501 """InventoryItem - a model defined in Swagger :param id: The id of this InventoryItem. # noqa: E501 :type id: str :param host: The host of this InventoryItem. # noqa: E501 :type host: str """ self.swagger_types = { 'id': str, 'host': str } self.attribute_map = { 'id': 'id', 'host': 'host' } self._id = id self._host = host @classmethod def from_dict(cls, dikt) -> 'InventoryItem': """Returns the dict as a model :param dikt: A dict. :type: dict :return: The InventoryItem of this InventoryItem. # noqa: E501 :rtype: InventoryItem """ return util.deserialize_model(dikt, cls) @property def id(self) -> str: """Gets the id of this InventoryItem. :return: The id of this InventoryItem. :rtype: str """ return self._id @id.setter def id(self, id: str): """Sets the id of this InventoryItem. :param id: The id of this InventoryItem. :type id: str """ if id is None: raise ValueError("Invalid value for `id`, must not be `None`") # noqa: E501 self._id = id @property def host(self) -> str: """Gets the host of this InventoryItem. :return: The host of this InventoryItem. :rtype: str """ return self._host @host.setter def host(self, host: str): """Sets the host of this InventoryItem. :param host: The host of this InventoryItem. :type host: str """ if host is None: raise ValueError("Invalid value for `host`, must not be `None`") # noqa: E501 self._host = host
from workalendar.core import WesternCalendar, ChristianMixin from workalendar.core import MON, TUE, FRI from datetime import date, timedelta class Australia(WesternCalendar, ChristianMixin): "Australia" include_good_friday = True include_easter_monday = True include_queens_birthday = False include_labour_day_october = False include_boxing_day = True # Shall we shift Anzac Day? shift_anzac_day = True FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + ( (1, 26, "Australia Day"), ) def get_canberra_day(self, year): return ( Australia.get_nth_weekday_in_month(year, 3, MON, 2), "Canberra Day" ) def get_queens_birthday(self, year): return ( Australia.get_nth_weekday_in_month(year, 6, MON, 2), "Queen's Birthday" ) def get_labour_day_october(self, year): return ( Australia.get_nth_weekday_in_month(year, 10, MON), 'Labour Day' ) def get_anzac_day(self, year): anzac_day = date(year, 4, 25) if not self.shift_anzac_day: return (anzac_day, "Anzac Day") if anzac_day.weekday() in self.get_weekend_days(): anzac_day = self.find_following_working_day(anzac_day) return (anzac_day, "Anzac Day") def get_variable_days(self, year): # usual variable days days = super(Australia, self).get_variable_days(year) january_first = date(year, 1, 1) if january_first.weekday() in self.get_weekend_days(): days.append(( self.find_following_working_day(january_first), "New Year's Day shift") ) australia_day = date(year, 1, 26) if australia_day.weekday() in self.get_weekend_days(): days.append(( self.find_following_working_day(australia_day), "Australia Day shift") ) # was fixed, but might be shifted days.append(self.get_anzac_day(year)) if self.include_queens_birthday: days.append(self.get_queens_birthday(year)) if self.include_labour_day_october: days.append(self.get_labour_day_october(year)) christmas = date(year, 12, 25) boxing_day = date(year, 12, 26) if christmas.weekday() in self.get_weekend_days(): shift = self.find_following_working_day(christmas) days.append((shift, "Christmas Shift")) days.append((shift + timedelta(days=1), "Boxing Day Shift")) elif boxing_day.weekday() in self.get_weekend_days(): shift = self.find_following_working_day(boxing_day) days.append((shift, "Boxing Day Shift")) return days class AustraliaCapitalTerritory(Australia): "Australia Capital Territory" include_easter_saturday = True include_queens_birthday = True include_labour_day_october = True include_boxing_day = True def get_family_community_day(self, year): # Since this day is picked unsing the school year calendar, there's no # mathematical way yet to provide it surely # Family & Community Day was celebrated on the first Tuesday of # November in 2007, 2008 and 2009 if year in (2007, 2008, 2009): day = AustraliaCapitalTerritory.get_nth_weekday_in_month( year, 11, TUE) elif year == 2010: day = date(2010, 9, 27) elif year == 2011: day = date(2011, 10, 10) elif year == 2012: day = date(2012, 10, 8) elif year == 2013: day = date(2013, 9, 30) elif year == 2014: day = date(2014, 9, 29) elif year == 2015: day = date(2015, 9, 28) elif year == 2016: day = date(2016, 9, 26) else: raise Exception("Year %d is not implemented, Sorry" % year) return (day, "Family & Community Day") def get_variable_days(self, year): days = super(AustraliaCapitalTerritory, self) \ .get_variable_days(year) days += [ self.get_canberra_day(year), self.get_family_community_day(year), ] return days class AustraliaNewSouthWales(Australia): "Australia New South Wales" include_queens_birthday = True include_easter_saturday = True include_easter_sunday = True include_labour_day_october = True include_boxing_day = True shift_anzac_day = False class AustraliaNorthernTerritory(Australia): "Australia Northern Territory" include_easter_saturday = True include_queens_birthday = True include_boxing_day = True def get_may_day(self, year): return ( AustraliaNorthernTerritory.get_nth_weekday_in_month( year, 5, MON), "May Day" ) def get_picnic_day(self, year): return ( AustraliaNorthernTerritory.get_nth_weekday_in_month( year, 8, MON), "Picnic Day" ) def get_variable_days(self, year): days = super(AustraliaNorthernTerritory, self) \ .get_variable_days(year) days += [ self.get_may_day(year), self.get_picnic_day(year), ] return days class AustraliaQueensland(Australia): "Australia Queensland" include_easter_saturday = True include_queens_birthday = True include_boxing_day = True def get_labour_day_may(self, year): return ( AustraliaNorthernTerritory.get_nth_weekday_in_month( year, 5, MON), "Labour Day" ) def get_variable_days(self, year): days = super(AustraliaQueensland, self) \ .get_variable_days(year) days += [ self.get_labour_day_may(year), ] return days class SouthAustralia(Australia): "South Australia" include_easter_saturday = True include_queens_birthday = True include_labour_day_october = True def get_adelaides_cup(self, year): return ( SouthAustralia.get_nth_weekday_in_month( year, 3, MON, 2), "Adelaide's cup" ) def get_proclamation_day(self, year): return (date(year, 12, 26), "Proclamation Day") def get_variable_days(self, year): days = super(SouthAustralia, self) \ .get_variable_days(year) days += [ self.get_adelaides_cup(year), self.get_proclamation_day(year), ] return days class Tasmania(Australia): "Tasmania" include_queens_birthday = True include_boxing_day = True shift_anzac_day = False @property def has_recreation_day(self): return True def get_eight_hours_day(self, year): return ( Tasmania.get_nth_weekday_in_month(year, 3, MON, 2), "Eight hours Day" ) def get_recreation_day(self, year): return ( Tasmania.get_nth_weekday_in_month(year, 11, MON), "Recreation Day" ) def get_variable_days(self, year): days = super(Tasmania, self).get_variable_days(year) days.append(self.get_eight_hours_day(year)) if self.has_recreation_day: days.append(self.get_recreation_day(year)) return days class Hobart(Tasmania): "Hobart" @property def has_recreation_day(self): return False def get_hobart(self, year): return ( Hobart.get_nth_weekday_in_month(year, 2, MON, 2), "Royal Hobart Regatta" ) def get_variable_days(self, year): days = super(Hobart, self).get_variable_days(year) days.append(self.get_hobart(year)) return days class Victoria(Australia): "Victoria" include_easter_saturday = True include_queens_birthday = True include_boxing_day = True def get_labours_day_in_march(self, year): return ( Victoria.get_nth_weekday_in_month(year, 3, MON, 2), "Labour Day" ) def get_melbourne_cup(self, year): return ( Victoria.get_nth_weekday_in_month(year, 11, TUE), "Melbourne Cup" ) def get_variable_days(self, year): days = super(Victoria, self).get_variable_days(year) days.append(self.get_labours_day_in_march(year)) days.append(self.get_melbourne_cup(year)) return days class WesternAustralia(Australia): "Western Australia" include_boxing_day = True def get_labours_day_in_march(self, year): return ( WesternAustralia.get_nth_weekday_in_month(year, 3, MON), "Labour Day" ) def get_western_australia_day(self, year): return ( WesternAustralia.get_nth_weekday_in_month(year, 6, MON), "Western Australia Day" ) def get_variable_days(self, year): # It is not possible to surely compute Queen's Birthday holiday in # The western Australia territory, since it's based on the Governor # Decision (it is typically the last Monday of September or the first # Monday of October) days = super(WesternAustralia, self).get_variable_days(year) days.append(self.get_labours_day_in_march(year)) days.append(self.get_western_australia_day(year)) return days class MarshallIslands(WesternCalendar, ChristianMixin): "Marshall Islands" FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + ( (3, 3, "Remembrance Day"), (5, 1, "Constitution Day"), (11, 17, "Presidents' Day"), (12, 31, "New Year's Eve"), ) include_good_friday = True def get_variable_days(self, year): days = super(MarshallIslands, self).get_variable_days(year) days.append(( MarshallIslands.get_nth_weekday_in_month(year, 7, FRI), "Fishermen's Holiday" )) days.append(( MarshallIslands.get_nth_weekday_in_month(year, 9, FRI), "Labour Day" )) days.append(( MarshallIslands.get_last_weekday_in_month(year, 9, FRI), "Manit Day" )) days.append(( MarshallIslands.get_nth_weekday_in_month(year, 12, FRI), "Gospel Day" )) return days
''' Title : Print Function Subdomain : Introduction Domain : Python Author : codeperfectplus Created : 17 January 2020 '''
import random import threading from threading import Thread, Event import unittest import uuid import logger import time import string from basetestcase import BaseTestCase from couchbase_helper.document import DesignDocument, View from membase.api.rest_client import RestConnection from membase.helper.spatial_helper import SpatialHelper from membase.helper.rebalance_helper import RebalanceHelper from remote.remote_util import RemoteMachineShellConnection class SpatialViewsTests(BaseTestCase): def setUp(self): super(SpatialViewsTests, self).setUp() self.thread_crashed = Event() self.thread_stopped = Event() self.skip_rebalance = self.input.param("skip_rebalance", False) self.use_dev_views = self.input.param("use-dev-views", False) self.default_map = "function (doc) {emit(doc.geometry, doc.age);}" self.map_updated = "function (doc) {emit(doc.geometry, doc.name);}" self.default_ddoc_name = self.input.param("default_ddoc_name", "test-ddoc") self.default_view_name = self.input.param("default_view_name", "test-view") self.ddoc_op = self.input.param("ddoc-ops", "create") #create\update\delete self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" self.helper = SpatialHelper(self, self.bucket_name) if not self.skip_rebalance: self.cluster.rebalance(self.servers[:], self.servers[1:], []) #load some items to verify self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc', return_docs=True) self.num_ddoc = self.input.param('num-ddoc', 1) self.views_per_ddoc = self.input.param('views-per-ddoc', 1) self.non_spatial_views_per_ddoc = self.input.param('non-spatial-views-per-ddoc', 0) if self.ddoc_op == 'update' or self.ddoc_op == 'delete': ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, self.non_spatial_views_per_ddoc) self.create_ddocs(ddocs) def suite_setUp(self): pass def tearDown(self): super(SpatialViewsTests, self).tearDown() def suite_tearDown(self): pass def test_add_spatial_views(self): ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, self.non_spatial_views_per_ddoc) self.perform_ddoc_ops(ddocs) def test_add_spatial_views_case_sensative(self): ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[ View(self.default_view_name, self.default_map, dev_view=self.use_dev_views, is_spatial=True), View(self.default_view_name.upper(), self.default_map, dev_view=self.use_dev_views, is_spatial=True)]) self.create_ddocs([ddoc]) def test_add_single_spatial_view(self): name_lenght = self.input.param('name_lenght', None) view_name = self.input.param('view_name', self.default_view_name) if name_lenght: view_name = ''.join(random.choice(string.ascii_lowercase) for x in range(name_lenght)) not_compilable = self.input.param('not_compilable', False) error = self.input.param('error', None) map_fn = (self.default_map, 'function (doc) {emit(doc.geometry, doc.age);')[not_compilable] ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[ View(view_name, map_fn, dev_view=self.use_dev_views, is_spatial=True)]) try: self.create_ddocs([ddoc]) except Exception as ex: if error and str(ex).find(error) != -1: self.log.info("Error caught as expected %s" % error) return else: self.fail("Unexpected error appeared during run %s" % ex) if error: self.fail("Expected error '%s' didn't appear" % error) def test_add_views_to_1_ddoc(self): same_names = self.input.param('same-name', False) error = self.input.param('error', None) num_views_per_ddoc = 10 create_threads = [] try: for i in range(num_views_per_ddoc): ddoc = DesignDocument(self.default_ddoc_name, [], spatial_views=[ View(self.default_view_name + (str(i), "")[same_names], self.default_map, dev_view=self.use_dev_views, is_spatial=True)]) create_thread = Thread(target=self.create_ddocs, name="create_thread" + str(i), args=([ddoc,],)) create_threads.append(create_thread) create_thread.start() for create_thread in create_threads: create_thread.join() except Exception as ex: if error and str(ex).find(error) != -1: self.log.info("Error caught as expected %s" % error) return else: self.fail("Unexpected error appeared during run %s" % ex) if error: self.fail("Expected error '%s' didn't appear" % error) def test_add_spatial_views_threads(self): same_names = self.input.param('same-name', False) num_views_per_ddoc = 10 create_threads = [] ddocs = [] for i in range(num_views_per_ddoc): ddoc = DesignDocument(self.default_ddoc_name + str(i), [], spatial_views=[ View(self.default_view_name + (str(i), "")[same_names], self.default_map, dev_view=self.use_dev_views, is_spatial=True)]) ddocs.append(ddoc) if self.ddoc_op == 'update' or self.ddoc_op == 'delete': self.create_ddocs(ddocs) i = 0 for ddoc in ddocs: create_thread = Thread(target=self.perform_ddoc_ops, name="ops_thread" + str(i), args=([ddoc,],)) i +=1 create_threads.append(create_thread) create_thread.start() for create_thread in create_threads: create_thread.join() if self.thread_crashed.is_set(): self.fail("Error occured during run") def test_create_with_other_ddoc_ops(self): operation = self.input.param('operation', 'create') ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0) other_ddocs = self.make_ddocs(self.num_ddoc, 0, self.views_per_ddoc) if operation == 'delete' or operation == 'update': self.create_ddocs(other_ddocs) other_ddoc_threads = [] for ddoc in other_ddocs: if operation == 'create' or operation == 'update': other_ddoc_thread = Thread(target=self.create_ddocs, name="other_doc_thread", args=(other_ddocs,)) else: other_ddoc_thread = Thread(target=self.delete_views, name="other_doc_thread", args=(other_ddocs,)) other_ddoc_threads.append(other_ddoc_thread) other_ddoc_thread.start() self.perform_ddoc_ops(ddocs) for thread in other_ddoc_threads: thread.join() def test_create_views_during_rebalance(self): start_cluster = self.input.param('start-cluster', 1) servers_in = self.input.param('servers_in', 0) servers_out = self.input.param('servers_out', 0) ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, self.non_spatial_views_per_ddoc) if start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:start_cluster], []) rebalance.result() servs_in = [] servs_out = [] if servers_in: servs_in = self.servers[start_cluster:servers_in + 1] if servers_out: if start_cluster > 1: servs_out = self.servers[1:start_cluster] servs_out = servs_out[-servers_out:] else: servs_out = self.servers[-servers_out:] rebalance_thread = Thread(target=self.cluster.rebalance, name="reb_thread", args=(self.servers[:1], servs_in, servs_out)) rebalance_thread.start() self.perform_ddoc_ops(ddocs) rebalance_thread.join() def test_views_node_pending_state(self): operation = self.input.param('operation', 'add_node') ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0) rest = RestConnection(self.master) if operation == 'add_node': self.log.info("adding the node %s:%s" % ( self.servers[1].ip, self.servers[1].port)) otpNode = rest.add_node(self.master.rest_username, self.master.rest_password, self.servers[1].ip, self.servers[1].port) elif operation == 'failover': nodes = rest.node_statuses() nodes = [node for node in nodes if node.ip != self.master.ip or node.port != self.master.port] rest.fail_over(nodes[0].id) else: self.fail("There is no operation %s" % operation) self.perform_ddoc_ops(ddocs) def test_views_failover(self): num_nodes = self.input.param('num-nodes', 1) ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0) RebalanceHelper.wait_for_persistence(self.master, self.bucket_name) self.cluster.failover(self.servers, self.servers[1:num_nodes]) self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes]) self.perform_ddoc_ops(ddocs) def test_views_with_warm_up(self): warmup_node = self.servers[-1] shell = RemoteMachineShellConnection(warmup_node) shell.stop_couchbase() time.sleep(20) shell.start_couchbase() shell.disconnect() ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0) self.perform_ddoc_ops(ddocs) def test_views_during_index(self): ddocs = self.make_ddocs(1, 1, 1) self.create_ddocs(ddocs) #run query stale=false to start index rest = RestConnection(self.master) for ddoc in ddocs: for view in ddoc.spatial_views: self.helper.query_view(rest, ddoc, view, bucket=self.bucket_name, extra_params={}) ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 1) self.perform_ddoc_ops(ddocs) def test_views_during_ddoc_compaction(self): fragmentation_value = self.input.param("fragmentation_value", 80) ddoc_to_compact = DesignDocument("ddoc_to_compact", [], spatial_views=[ View(self.default_view_name, 'function (doc) { emit(doc.age, doc.name);}', dev_view=self.use_dev_views)]) ddocs = self.make_ddocs(self.num_ddoc, self.views_per_ddoc, 0) self.disable_compaction() self.create_ddocs([ddoc_to_compact,]) fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master, ddoc_to_compact.name, fragmentation_value, self.default_bucket_name) end_time = time.time() + self.wait_timeout * 30 while fragmentation_monitor.state != "FINISHED" and end_time > time.time(): self.helper.insert_docs(self.num_items, 'spatial-doc') if end_time < time.time() and fragmentation_monitor.state != "FINISHED": self.fail("impossible to reach compaction value after %s sec" % (self.wait_timeout * 20)) fragmentation_monitor.result() compaction_task = self.cluster.async_compact_view(self.master, ddoc_to_compact.name, self.default_bucket_name) self.perform_ddoc_ops(ddocs) result = compaction_task.result(self.wait_timeout * 10) self.assertTrue(result, "Compaction didn't finished correctly. Please check diags") def make_ddocs(self, ddocs_num, views_per_ddoc, non_spatial_views_per_ddoc): ddocs = [] for i in range(ddocs_num): views = [] for k in range(views_per_ddoc): views.append(View(self.default_view_name + str(k), self.default_map, dev_view=self.use_dev_views, is_spatial=True)) non_spatial_views = [] if non_spatial_views_per_ddoc: for k in range(non_spatial_views_per_ddoc): non_spatial_views.append(View(self.default_view_name + str(k), 'function (doc) { emit(null, doc);}', dev_view=self.use_dev_views)) ddocs.append(DesignDocument(self.default_ddoc_name + str(i), non_spatial_views, spatial_views=views)) return ddocs def create_ddocs(self, ddocs, bucket=None): bucket_views = bucket or self.buckets[0] for ddoc in ddocs: if not (ddoc.views or ddoc.spatial_views): self.cluster.create_view(self.master, ddoc.name, [], bucket=bucket_views) for view in ddoc.views: self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket_views) for view in ddoc.spatial_views: self.cluster.create_view(self.master, ddoc.name, view, bucket=bucket_views) def delete_views(self, ddocs, views=[], spatial_views=[], bucket=None): bucket_views = bucket or self.buckets[0] for ddoc in ddocs: vs = views or ddoc.views sp_vs = spatial_views or ddoc.spatial_views for view in vs: self.cluster.delete_view(self.master, ddoc.name, view, bucket=bucket_views) for view in sp_vs: self.cluster.delete_view(self.master, ddoc.name, view, bucket=bucket_views) def perform_ddoc_ops(self, ddocs): try: if self.ddoc_op == 'update': for ddoc in ddocs: for view in ddoc.spatial_views: view.map_func = self.map_updated if self.ddoc_op == 'delete': self.delete_views(ddocs) else: self.create_ddocs(ddocs) except Exception as ex: self.thread_crashed.set() self.log.error("****ERROR***** \n At least one of threads is crashed: %s" % (ex)) raise ex finally: if not self.thread_stopped.is_set(): self.thread_stopped.set() class SpatialViewQueriesTests(BaseTestCase): def setUp(self): self.helper = SpatialHelper(self, self.bucket_name) super(SpatialViewQueriesTests, self).setUp() self.thread_crashed = Event() self.thread_stopped = Event() self.skip_rebalance = self.input.param("skip_rebalance", False) self.use_dev_views = self.input.param("use-dev-views", False) self.all_view_one_ddoc = self.input.param("all-view-one-ddoc", False) self.default_ddoc_name = "test-ddoc-query" self.default_view_name = "test-view-query" self.params = self.get_query_params() self.bucket_name = "default" if self.standard_buckets: self.bucket_name = "standard_bucket0" if self.sasl_buckets: self.bucket_name = "bucket0" if not self.skip_rebalance: self.cluster.rebalance(self.servers[:], self.servers[1:], []) #load some items to verify self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc', return_docs=True) self.ddocs = self.helper.create_default_views( is_one_ddoc=self.all_view_one_ddoc) def suite_setUp(self): pass def tearDown(self): super(SpatialViewQueriesTests, self).tearDown() def suite_tearDown(self): pass def test_spatial_view_queries(self): error = self.input.param('error', None) try: self.query_and_verify_result(self.docs, self.params) except Exception as ex: if error and str(ex).find(error) != -1: self.log.info("Error caught as expected %s" % error) return else: self.fail("Unexpected error appeared during run %s" % ex) if error: self.fail("Expected error '%s' didn't appear" % error) def test_add_spatial_view_queries_threads(self): diff_nodes = self.input.param("diff-nodes", False) query_threads = [] for i in range(len(self.servers)): node = (self.master, self.servers[i])[diff_nodes] self.query_and_verify_result(self.docs, self.params, node=node) q_thread = Thread(target=self.query_and_verify_result, name="query_thread" + str(i), args=([self.docs, self.params, node])) query_threads.append(q_thread) q_thread.start() for q_thread in query_threads: q_thread.join() if self.thread_crashed.is_set(): self.fail("Error occured during run") def test_view_queries_during_rebalance(self): start_cluster = self.input.param('start-cluster', 1) servers_in = self.input.param('servers_in', 0) servers_out = self.input.param('servers_out', 0) if start_cluster > 1: rebalance = self.cluster.async_rebalance(self.servers[:1], self.servers[1:start_cluster], []) rebalance.result() servs_in = [] servs_out = [] if servers_in: servs_in = self.servers[start_cluster:servers_in + 1] if servers_out: if start_cluster > 1: servs_out = self.servers[1:start_cluster] servs_out = servs_out[-servers_out:] else: servs_out = self.servers[-servers_out:] rebalance = self.cluster.async_rebalance(self.servers, servs_in, servs_out) self.query_and_verify_result(self.docs, self.params) rebalance.result() def test_view_queries_node_pending_state(self): operation = self.input.param('operation', 'add_node') rest = RestConnection(self.master) if operation == 'add_node': self.log.info("adding the node %s:%s" % ( self.servers[1].ip, self.servers[1].port)) otpNode = rest.add_node(self.master.rest_username, self.master.rest_password, self.servers[1].ip, self.servers[1].port) elif operation == 'failover': nodes = rest.node_statuses() nodes = [node for node in nodes if node.ip != self.master.ip or node.port != self.master.port] rest.fail_over(nodes[0].id) else: self.fail("There is no operation %s" % operation) self.query_and_verify_result(self.docs, self.params) def test_view_queries_failover(self): num_nodes = self.input.param('num-nodes', 1) self.cluster.failover(self.servers, self.servers[1:num_nodes]) self.cluster.rebalance(self.servers, [], self.servers[1:num_nodes]) self.query_and_verify_result(self.docs, self.params) def test_views_with_warm_up(self): warmup_node = self.servers[-1] shell = RemoteMachineShellConnection(warmup_node) shell.stop_couchbase() time.sleep(20) shell.start_couchbase() shell.disconnect() self.query_and_verify_result(self.docs, self.params) def test_view_queries_during_ddoc_compaction(self): fragmentation_value = self.input.param("fragmentation_value", 80) self.disable_compaction() fragmentation_monitor = self.cluster.async_monitor_view_fragmentation(self.master, self.ddocs[0].name, fragmentation_value, self.default_bucket_name) end_time = time.time() + self.wait_timeout * 30 while fragmentation_monitor.state != "FINISHED" and end_time > time.time(): self.docs = self.helper.insert_docs(self.num_items, 'spatial-doc', return_docs=True) if end_time < time.time() and fragmentation_monitor.state != "FINISHED": self.fail("impossible to reach compaction value after %s sec" % (self.wait_timeout * 20)) fragmentation_monitor.result() compaction_task = self.cluster.async_compact_view(self.master, self.ddocs[0].name, self.default_bucket_name) self.query_and_verify_result(self.docs, self.params) result = compaction_task.result(self.wait_timeout * 10) self.assertTrue(result, "Compaction didn't finished correctly. Please check diags") def get_query_params(self): current_params = {} for key in self.input.test_params: if key == 'skip' or key == 'limit': current_params[key] = int(self.input.test_params[key]) elif key == 'bbox': current_params[key] = [int(x) for x in self.input.test_params[key][1:-1].split(",")] elif key == 'stale': current_params[key] = self.input.test_params[key] return current_params def query_and_verify_result(self, doc_inserted, params, node=None): try: rest = RestConnection(self.master) if node: rest = RestConnection(node) expected_ddocs = self.helper.generate_matching_docs(doc_inserted, params) for ddoc in self.ddocs: for view in ddoc.spatial_views: result_ddocs = self.helper.query_view(rest, ddoc, view, bucket=self.bucket_name, extra_params=params, num_expected=len(expected_ddocs), num_tries=20) self.helper.verify_matching_keys(expected_ddocs, result_ddocs) except Exception as ex: self.thread_crashed.set() self.log.error("****ERROR***** \n At least one of threads is crashed: %s" % (ex)) raise ex finally: if not self.thread_stopped.is_set(): self.thread_stopped.set() class SpatialViewTests(BaseTestCase): def setUp(self): self.helper = SpatialHelper(self, "default") super(SpatialViewTests, self).setUp() self.log = logger.Logger.get_logger() self.helper.setup_cluster() def suite_setUp(self): pass def tearDown(self): super(SpatialViewTests, self).tearDown() def suite_tearDown(self): pass def test_create_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : create {0} spatial views without " "running any spatial view query".format(num_design_docs)) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, fun) def test_update_x_design_docs(self): num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : update {0} spatial views without " "running any spatial view query".format(num_design_docs)) fun = "function (doc) {emit(doc.geometry, doc);}" self._insert_x_design_docs(num_design_docs, fun) # Update the design docs with a different function fun = "function (doc) {emit(doc.geometry, null);}" self._insert_x_design_docs(num_design_docs, fun) def _insert_x_design_docs(self, num_design_docs, fun): rest = self.helper.rest bucket = self.helper.bucket name = "dev_test_multiple_design_docs" for i in range(0, num_design_docs): design_name = "{0}-{1}".format(name, i) self.helper.create_index_fun(design_name, fun) # Verify that the function was really stored response, meta = rest.get_spatial(bucket, design_name) self.assertTrue(response) self.assertEqual(meta["id"], "_design/{0}".format(design_name)) self.assertEqual(response["spatial"][design_name], fun) def test_insert_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view on {0} documents"\ .format(num_docs)) design_name = "dev_test_insert_{0}_docs".format(num_docs) self._insert_x_docs_and_query(num_docs, design_name) # Does verify the full docs and not only the keys def test_insert_x_docs_full_verification(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view with {0} docs" " and verify the full documents".format(num_docs)) design_name = "dev_test_insert_{0}_docs_full_verification"\ .format(num_docs) self.helper.create_index_fun(design_name) inserted_docs = self.helper.insert_docs(num_docs, return_docs=True) self.helper.query_index_for_verification(design_name, inserted_docs, full_docs=True) def test_insert_x_delete_y_docs(self): num_docs = self.helper.input.param("num-docs") num_deleted_docs = self.helper.input.param("num-deleted-docs") self.log.info("description : create spatial view with {0} docs " " and delete {1} docs".format(num_docs, num_deleted_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_deleted_docs) inserted_keys = self._setup_index(design_name, num_docs) # Delete documents and verify that the documents got deleted deleted_keys = self.helper.delete_docs(num_deleted_docs) num_expected = num_docs - len(deleted_keys) results = self.helper.get_results(design_name, 2 * num_docs, num_expected=num_expected) result_keys = self.helper.get_keys(results) self.assertEqual(len(result_keys), num_expected) self.helper.verify_result(inserted_keys, deleted_keys + result_keys) def test_insert_x_update_y_docs(self): num_docs = self.helper.input.param("num-docs") num_updated_docs = self.helper.input.param("num-updated-docs") self.log.info("description : create spatial view with {0} docs " " and update {1} docs".format(num_docs, num_updated_docs)) design_name = "dev_test_insert_{0}_delete_{1}_docs"\ .format(num_docs, num_updated_docs) self._setup_index(design_name, num_docs) # Update documents and verify that the documents got updated updated_keys = self.helper.insert_docs(num_updated_docs, extra_values=dict(updated=True)) results = self.helper.get_results(design_name, 2 * num_docs) result_updated_keys = self._get_updated_docs_keys(results) self.assertEqual(len(updated_keys), len(result_updated_keys)) self.helper.verify_result(updated_keys, result_updated_keys) def test_get_spatial_during_x_min_load_y_working_set(self): num_docs = self.helper.input.param("num-docs") duration = self.helper.input.param("load-time") self.log.info("description : this test will continuously insert data " "and get the spatial view results for {0} minutes") design_name = "dev_test_insert_and_get_spatial_{0}_mins"\ .format(duration) self._query_x_mins_during_loading(num_docs, duration, design_name) def _query_x_mins_during_loading(self, num_docs, duration, design_name): self.helper.create_index_fun(design_name) load_thread = InsertDataTillStopped(self.helper, num_docs) load_thread.start() self._get_results_for_x_minutes(design_name, duration) load_thread.stop_insertion() load_thread.join() self.helper.query_index_for_verification(design_name, load_thread.inserted()) def test_get_spatial_during_x_min_load_y_working_set_multiple_design_docs( self): num_docs = self.helper.input.param("num-docs") num_design_docs = self.helper.input.param("num-design-docs") duration = self.helper.input.param("load-time") self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried while the data " "is loaded for {2} minutes" .format(num_docs, num_design_docs, duration)) name = "dev_test_spatial_test_{0}_docs_{1}_design_docs_{2}_mins_load"\ .format(num_docs, num_design_docs, duration) view_test_threads = [] for i in range(0, num_design_docs): design_name = "{0}-{1}".format(name, i) thread_result = [] t = Thread( target=SpatialViewTests._test_multiple_design_docs_thread_wrapper, name="Insert documents and query multiple design docs in parallel", args=(self, num_docs, duration, design_name, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_multiple_design_docs_thread_wrapper(self, num_docs, duration, design_name, failures): try: self._query_x_mins_during_loading(num_docs, duration, design_name) except Exception as ex: failures.append(ex) def test_spatial_view_on_x_docs_y_design_docs(self): num_docs = self.helper.input.param("num-docs") num_design_docs = self.helper.input.param("num-design-docs") self.log.info("description : will create {0} docs per design doc and " "{1} design docs that will be queried") name = "dev_test_spatial_test_{0}_docs_y_design_docs"\ .format(num_docs, num_design_docs) design_names = ["{0}-{1}".format(name, i) \ for i in range(0, num_design_docs)] view_test_threads = [] for design_name in design_names: thread_result = [] t = Thread( target=SpatialViewTests._test_spatial_view_thread_wrapper, name="Insert documents and query in parallel", args=(self, num_docs, design_name, thread_result)) t.start() view_test_threads.append((t, thread_result)) for (t, failures) in view_test_threads: t.join() for (t, failures) in view_test_threads: if len(failures) > 0: self.fail("view thread failed : {0}".format(failures[0])) def _test_spatial_view_thread_wrapper(self, num_docs, design_name, failures): try: self._insert_x_docs_and_query(num_docs, design_name) except Exception as ex: failures.append(ex) # Create the index and insert documents including verififaction that # the index contains them # Returns the keys of the inserted documents def _setup_index(self, design_name, num_docs): self.helper.create_index_fun(design_name) inserted_keys = self.helper.insert_docs(num_docs) self.helper.query_index_for_verification(design_name, inserted_keys) return inserted_keys # Return the keys for all docs that contain a key called "updated" # in the value def _get_updated_docs_keys(self, results): keys = [] if results: rows = results["rows"] for row in rows: if "updated" in row["value"]: keys.append(row["id"]) self.log.info("{0} documents to updated".format(len(keys))) return keys def _get_results_for_x_minutes(self, design_name, duration, delay=5): random.seed(0) start = time.time() while (time.time() - start) < duration * 60: limit = random.randint(1, 1000) self.log.info("{0} seconds has passed ....".format( (time.time() - start))) results = self.helper.get_results(design_name, limit) keys = self.helper.get_keys(results) self.log.info("spatial view returned {0} rows".format(len(keys))) time.sleep(delay) def _insert_x_docs_and_query(self, num_docs, design_name): inserted_keys = self._setup_index(design_name, num_docs) self.assertEqual(len(inserted_keys), num_docs) def test_update_view_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : create a spatial view on {0} documents " "and update the view so that it returns only a subset"\ .format(num_docs)) design_name = "dev_test_update_view_{0}_docs".format(num_docs) # Create an index that emits all documents self.helper.create_index_fun(design_name) keys_b = self.helper.insert_docs(num_docs // 3, "bbb") keys_c = self.helper.insert_docs(num_docs - (num_docs // 3), "ccc") self.helper.query_index_for_verification(design_name, keys_b + keys_c) # Update index to only a subset of the documents spatial_fun = ('function (doc, meta) {' 'if(meta.id.indexOf("ccc") != -1) {' 'emit(doc.geometry, doc);}}') self.helper.create_index_fun(design_name, spatial_fun) self.helper.query_index_for_verification(design_name, keys_c) def test_compare_views_all_nodes_x_docs(self): num_docs = self.helper.input.param("num-docs") self.log.info("description : creates view on {0} documents, queries " "all nodes (not only the master node) and compares " "if the results are all the same"\ .format(num_docs)) design_name = "dev_test_compare_views_{0}_docs".format(num_docs) inserted_keys = self._setup_index(design_name, num_docs) nodes = self.helper.rest.get_nodes() params = {"connection_timeout": 60000, "full_set": True} # Query every single node and verify for n in nodes: n_rest = RestConnection({ "ip": n.ip, "port": n.port, "username": self.helper.master.rest_username, "password": self.helper.master.rest_password}) results = n_rest.spatial_results(self.helper.bucket, design_name, params, None) result_keys = self.helper.get_keys(results) self.helper.verify_result(inserted_keys, result_keys) class InsertDataTillStopped(threading.Thread): def __init__(self, helper, num_docs): threading.Thread.__init__(self) self._helper = helper self._num_docs = num_docs self._stop_insertion = False self._last_inserted = [] def run(self): i = 0 while not self._stop_insertion: i += 1 self._last_inserted = self._helper.insert_docs( self._num_docs) def stop_insertion(self): self._stop_insertion = True # Return the last inserted set of docs def inserted(self): return self._last_inserted
""" Given a string, determine if it is a palindrome, considering only alphanumeric characters and ignoring cases. For example, "A man, a plan, a canal: Panama" is a palindrome. "race a car" is not a palindrome. Note: Have you consider that the string might be empty? This is a good question to ask during an interview. For the purpose of this problem, we define empty string as valid palindrome. """ from string import ascii_letters def is_palindrome(s): """ :type s: str :rtype: bool """ i = 0 j = len(s)-1 while i < j: while i < j and not s[i].isalnum(): i += 1 while i < j and not s[j].isalnum(): j -= 1 if s[i].lower() != s[j].lower(): return False i, j = i+1, j-1 return True """ Here is a bunch of other variations of is_palindrome function. Variation 1: Find the reverse of the string and compare it with the original string Variation 2: Loop from the start to length/2 and check the first character and last character and so on... for instance s[0] compared with s[n-1], s[1] == s[n-2]... Variation 3: Using stack idea. Note: We are assuming that we are just checking a one word string. To check if a complete sentence """ def remove_punctuation(s): """ Remove punctuation, case sensitivity and spaces """ return "".join(i.lower() for i in s if i in ascii_letters) # Variation 1 def string_reverse(s): return s[::-1] def is_palindrome_reverse(s): s = remove_punctuation(s) # can also get rid of the string_reverse function and just do this return s == s[::-1] in one line. if (s == string_reverse(s)): return True return False # Variation 2 def is_palindrome_two_pointer(s): s = remove_punctuation(s) for i in range(0, len(s)//2): if (s[i] != s[len(s) - i - 1]): return False return True # Variation 3 def is_palindrome_stack(s): stack = [] s = remove_punctuation(s) for i in range(len(s)//2, len(s)): stack.append(s[i]) for i in range(0, len(s)//2): if s[i] != stack.pop(): return False return True
# -*- coding: utf-8 -*- """ Training a Classifier ===================== This is it. You have seen how to define neural networks, compute loss and make updates to the weights of the network. Now you might be thinking, What about data? ---------------- Generally, when you have to deal with image, text, audio or video data, you can use standard python packages that load data into a numpy array. Then you can convert this array into a ``torch.*Tensor``. - For images, packages such as Pillow, OpenCV are useful - For audio, packages such as scipy and librosa - For text, either raw Python or Cython based loading, or NLTK and SpaCy are useful Specifically for vision, we have created a package called ``torchvision``, that has data loaders for common datasets such as Imagenet, CIFAR10, MNIST, etc. and data transformers for images, viz., ``torchvision.datasets`` and ``torch.utils.data.DataLoader``. This provides a huge convenience and avoids writing boilerplate code. For this tutorial, we will use the CIFAR10 dataset. It has the classes: ‘airplane’, ‘automobile’, ‘bird’, ‘cat’, ‘deer’, ‘dog’, ‘frog’, ‘horse’, ‘ship’, ‘truck’. The images in CIFAR-10 are of size 3x32x32, i.e. 3-channel color images of 32x32 pixels in size. .. figure:: /_static/img/cifar10.png :alt: cifar10 cifar10 Training an image classifier ---------------------------- We will do the following steps in order: 1. Load and normalizing the CIFAR10 training and test datasets using ``torchvision`` 2. Define a Convolutional Neural Network 3. Define a loss function 4. Train the network on the training data 5. Test the network on the test data 1. Loading and normalizing CIFAR10 ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ Using ``torchvision``, it’s extremely easy to load CIFAR10. """ import torch import torchvision import torchvision.transforms as transforms ######################################################################## # The output of torchvision datasets are PILImage images of range [0, 1]. # We transform them to Tensors of normalized range [-1, 1]. transform = transforms.Compose( [transforms.ToTensor(), transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) trainset = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) trainloader = torch.utils.data.DataLoader(trainset, batch_size=4, shuffle=True, num_workers=2) testset = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) testloader = torch.utils.data.DataLoader(testset, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck') ######################################################################## # Let us show some of the training images, for fun. import matplotlib.pyplot as plt import numpy as np # functions to show an image def imshow(img): img = img / 2 + 0.5 # unnormalize npimg = img.numpy() plt.imshow(np.transpose(npimg, (1, 2, 0))) plt.show() # get some random training images dataiter = iter(trainloader) images, labels = dataiter.next() # show images imshow(torchvision.utils.make_grid(images)) # print labels print(' '.join('%5s' % classes[labels[j]] for j in range(4))) ######################################################################## # 2. Define a Convolutional Neural Network # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Copy the neural network from the Neural Networks section before and modify it to # take 3-channel images (instead of 1-channel images as it was defined). import torch.nn as nn import torch.nn.functional as F class Net(nn.Module): def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(3, 6, 5) self.pool = nn.MaxPool2d(2, 2) self.conv2 = nn.Conv2d(6, 16, 5) self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x net = Net() ######################################################################## # 3. Define a Loss function and optimizer # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # Let's use a Classification Cross-Entropy loss and SGD with momentum. import torch.optim as optim criterion = nn.CrossEntropyLoss() optimizer = optim.SGD(net.parameters(), lr=0.001, momentum=0.9) ######################################################################## # 4. Train the network # ^^^^^^^^^^^^^^^^^^^^ # # This is when things start to get interesting. # We simply have to loop over our data iterator, and feed the inputs to the # network and optimize. for epoch in range(2): # loop over the dataset multiple times running_loss = 0.0 for i, data in enumerate(trainloader, 0): # get the inputs inputs, labels = data # zero the parameter gradients optimizer.zero_grad() # forward + backward + optimize outputs = net(inputs) loss = criterion(outputs, labels) loss.backward() optimizer.step() # print statistics running_loss += loss.item() if i % 2000 == 1999: # print every 2000 mini-batches print('[%d, %5d] loss: %.3f' % (epoch + 1, i + 1, running_loss / 2000)) running_loss = 0.0 print('Finished Training') ######################################################################## # 5. Test the network on the test data # ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ # # We have trained the network for 2 passes over the training dataset. # But we need to check if the network has learnt anything at all. # # We will check this by predicting the class label that the neural network # outputs, and checking it against the ground-truth. If the prediction is # correct, we add the sample to the list of correct predictions. # # Okay, first step. Let us display an image from the test set to get familiar. dataiter = iter(testloader) images, labels = dataiter.next() # print images imshow(torchvision.utils.make_grid(images)) print('GroundTruth: ', ' '.join('%5s' % classes[labels[j]] for j in range(4))) ######################################################################## # Okay, now let us see what the neural network thinks these examples above are: outputs = net(images) ######################################################################## # The outputs are energies for the 10 classes. # Higher the energy for a class, the more the network # thinks that the image is of the particular class. # So, let's get the index of the highest energy: _, predicted = torch.max(outputs, 1) print('Predicted: ', ' '.join('%5s' % classes[predicted[j]] for j in range(4))) ######################################################################## # The results seem pretty good. # # Let us look at how the network performs on the whole dataset. correct = 0 total = 0 with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print('Accuracy of the network on the 10000 test images: %d %%' % ( 100 * correct / total)) ######################################################################## # That looks waaay better than chance, which is 10% accuracy (randomly picking # a class out of 10 classes). # Seems like the network learnt something. # # Hmmm, what are the classes that performed well, and the classes that did # not perform well: class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in testloader: images, labels = data outputs = net(images) _, predicted = torch.max(outputs, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print('Accuracy of %5s : %2d %%' % ( classes[i], 100 * class_correct[i] / class_total[i])) ######################################################################## # Okay, so what next? # # How do we run these neural networks on the GPU? # # Training on GPU # ---------------- # Just like how you transfer a Tensor on to the GPU, you transfer the neural # net onto the GPU. # # Let's first define our device as the first visible cuda device if we have # CUDA available: device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") # Assume that we are on a CUDA machine, then this should print a CUDA device: print(device) ######################################################################## # The rest of this section assumes that `device` is a CUDA device. # # Then these methods will recursively go over all modules and convert their # parameters and buffers to CUDA tensors: # # .. code:: python # # net.to(device) # # # Remember that you will have to send the inputs and targets at every step # to the GPU too: # # .. code:: python # # inputs, labels = inputs.to(device), labels.to(device) # # Why dont I notice MASSIVE speedup compared to CPU? Because your network # is realllly small. # # **Exercise:** Try increasing the width of your network (argument 2 of # the first ``nn.Conv2d``, and argument 1 of the second ``nn.Conv2d`` – # they need to be the same number), see what kind of speedup you get. # # **Goals achieved**: # # - Understanding PyTorch's Tensor library and neural networks at a high level. # - Train a small neural network to classify images # # Training on multiple GPUs # ------------------------- # If you want to see even more MASSIVE speedup using all of your GPUs, # please check out :doc:`data_parallel_tutorial`. # # Where do I go next? # ------------------- # # - :doc:`Train neural nets to play video games </intermediate/reinforcement_q_learning>` # - `Train a state-of-the-art ResNet network on imagenet`_ # - `Train a face generator using Generative Adversarial Networks`_ # - `Train a word-level language model using Recurrent LSTM networks`_ # - `More examples`_ # - `More tutorials`_ # - `Discuss PyTorch on the Forums`_ # - `Chat with other users on Slack`_ # # .. _Train a state-of-the-art ResNet network on imagenet: https://github.com/pytorch/examples/tree/master/imagenet # .. _Train a face generator using Generative Adversarial Networks: https://github.com/pytorch/examples/tree/master/dcgan # .. _Train a word-level language model using Recurrent LSTM networks: https://github.com/pytorch/examples/tree/master/word_language_model # .. _More examples: https://github.com/pytorch/examples # .. _More tutorials: https://github.com/pytorch/tutorials # .. _Discuss PyTorch on the Forums: https://discuss.pytorch.org/ # .. _Chat with other users on Slack: https://pytorch.slack.com/messages/beginner/ # %%%%%%INVISIBLE_CODE_BLOCK%%%%%% del dataiter # %%%%%%INVISIBLE_CODE_BLOCK%%%%%%
from collections.abc import MutableSequence import re from textwrap import dedent from ._grouping import grouping_len, map_grouping from .development.base_component import Component from . import exceptions from ._utils import patch_collections_abc, stringify_id, to_json def validate_callback(outputs, inputs, state, extra_args, types): Input, Output, State = types if extra_args: if not isinstance(extra_args[0], (Output, Input, State)): raise exceptions.IncorrectTypeException( dedent( """ Callback arguments must be `Output`, `Input`, or `State` objects, optionally wrapped in a list or tuple. We found (possibly after unwrapping a list or tuple): {} """ ).format(repr(extra_args[0])) ) raise exceptions.IncorrectTypeException( dedent( """ In a callback definition, you must provide all Outputs first, then all Inputs, then all States. After this item: {} we found this item next: {} """ ).format(repr((outputs + inputs + state)[-1]), repr(extra_args[0])) ) for args in [outputs, inputs, state]: for arg in args: validate_callback_arg(arg) def validate_callback_arg(arg): if not isinstance(getattr(arg, "component_property", None), str): raise exceptions.IncorrectTypeException( dedent( """ component_property must be a string, found {!r} """ ).format(arg.component_property) ) if hasattr(arg, "component_event"): raise exceptions.NonExistentEventException( """ Events have been removed. Use the associated property instead. """ ) if isinstance(arg.component_id, dict): validate_id_dict(arg) elif isinstance(arg.component_id, str): validate_id_string(arg) else: raise exceptions.IncorrectTypeException( dedent( """ component_id must be a string or dict, found {!r} """ ).format(arg.component_id) ) def validate_id_dict(arg): arg_id = arg.component_id for k in arg_id: # Need to keep key type validation on the Python side, since # non-string keys will be converted to strings in json.dumps and may # cause unwanted collisions if not isinstance(k, str): raise exceptions.IncorrectTypeException( dedent( """ Wildcard ID keys must be non-empty strings, found {!r} in id {!r} """ ).format(k, arg_id) ) def validate_id_string(arg): arg_id = arg.component_id invalid_chars = ".{" invalid_found = [x for x in invalid_chars if x in arg_id] if invalid_found: raise exceptions.InvalidComponentIdError( """ The element `{}` contains `{}` in its ID. Characters `{}` are not allowed in IDs. """.format( arg_id, "`, `".join(invalid_found), "`, `".join(invalid_chars) ) ) def validate_output_spec(output, output_spec, Output): """ This validation is for security and internal debugging, not for users, so the messages are not intended to be clear. `output` comes from the callback definition, `output_spec` from the request. """ if not isinstance(output, (list, tuple)): output, output_spec = [output], [output_spec] elif len(output) != len(output_spec): raise exceptions.CallbackException("Wrong length output_spec") for outi, speci in zip(output, output_spec): speci_list = speci if isinstance(speci, (list, tuple)) else [speci] for specij in speci_list: if not Output(specij["id"], specij["property"]) == outi: raise exceptions.CallbackException( "Output does not match callback definition" ) def validate_and_group_input_args(flat_args, arg_index_grouping): if grouping_len(arg_index_grouping) != len(flat_args): raise exceptions.CallbackException("Inputs do not match callback definition") args_grouping = map_grouping(lambda ind: flat_args[ind], arg_index_grouping) if isinstance(arg_index_grouping, dict): func_args = [] func_kwargs = args_grouping elif isinstance(arg_index_grouping, (tuple, list)): func_args = list(args_grouping) func_kwargs = {} else: # Scalar input func_args = [args_grouping] func_kwargs = {} return func_args, func_kwargs def validate_multi_return(outputs_list, output_value, callback_id): if not isinstance(output_value, (list, tuple)): raise exceptions.InvalidCallbackReturnValue( dedent( """ The callback {} is a multi-output. Expected the output type to be a list or tuple but got: {}. """ ).format(callback_id, repr(output_value)) ) if len(output_value) != len(outputs_list): raise exceptions.InvalidCallbackReturnValue( """ Invalid number of output values for {}. Expected {}, got {} """.format( callback_id, len(outputs_list), len(output_value) ) ) for i, outi in enumerate(outputs_list): if isinstance(outi, list): vi = output_value[i] if not isinstance(vi, (list, tuple)): raise exceptions.InvalidCallbackReturnValue( dedent( """ The callback {} output {} is a wildcard multi-output. Expected the output type to be a list or tuple but got: {}. output spec: {} """ ).format(callback_id, i, repr(vi), repr(outi)) ) if len(vi) != len(outi): raise exceptions.InvalidCallbackReturnValue( dedent( """ Invalid number of output values for {} item {}. Expected {}, got {} output spec: {} output value: {} """ ).format(callback_id, i, len(vi), len(outi), repr(outi), repr(vi)) ) def fail_callback_output(output_value, output): valid_children = (str, int, float, type(None), Component) valid_props = (str, int, float, type(None), tuple, MutableSequence) def _raise_invalid(bad_val, outer_val, path, index=None, toplevel=False): bad_type = type(bad_val).__name__ outer_id = ( "(id={:s})".format(outer_val.id) if getattr(outer_val, "id", False) else "" ) outer_type = type(outer_val).__name__ if toplevel: location = dedent( """ The value in question is either the only value returned, or is in the top level of the returned list, """ ) else: index_string = "[*]" if index is None else "[{:d}]".format(index) location = dedent( """ The value in question is located at {} {} {} {}, """ ).format(index_string, outer_type, outer_id, path) raise exceptions.InvalidCallbackReturnValue( dedent( """ The callback for `{output}` returned a {object:s} having type `{type}` which is not JSON serializable. {location} and has string representation `{bad_val}` In general, Dash properties can only be dash components, strings, dictionaries, numbers, None, or lists of those. """ ).format( output=repr(output), object="tree with one value" if not toplevel else "value", type=bad_type, location=location, bad_val=bad_val, ) ) def _valid_child(val): return isinstance(val, valid_children) def _valid_prop(val): return isinstance(val, valid_props) def _can_serialize(val): if not (_valid_child(val) or _valid_prop(val)): return False try: to_json(val) except TypeError: return False return True def _validate_value(val, index=None): # val is a Component if isinstance(val, Component): unserializable_items = [] # pylint: disable=protected-access for p, j in val._traverse_with_paths(): # check each component value in the tree if not _valid_child(j): _raise_invalid(bad_val=j, outer_val=val, path=p, index=index) if not _can_serialize(j): # collect unserializable items separately, so we can report # only the deepest level, not all the parent components that # are just unserializable because of their children. unserializable_items = [ i for i in unserializable_items if not p.startswith(i[0]) ] if unserializable_items: # we already have something unserializable in a different # branch - time to stop and fail break if all(not i[0].startswith(p) for i in unserializable_items): unserializable_items.append((p, j)) # Children that are not of type Component or # list/tuple not returned by traverse child = getattr(j, "children", None) if not isinstance(child, (tuple, MutableSequence)): if child and not _can_serialize(child): _raise_invalid( bad_val=child, outer_val=val, path=p + "\n" + "[*] " + type(child).__name__, index=index, ) if unserializable_items: p, j = unserializable_items[0] # just report the first one, even if there are multiple, # as that's how all the other errors work _raise_invalid(bad_val=j, outer_val=val, path=p, index=index) # Also check the child of val, as it will not be returned child = getattr(val, "children", None) if not isinstance(child, (tuple, MutableSequence)): if child and not _can_serialize(val): _raise_invalid( bad_val=child, outer_val=val, path=type(child).__name__, index=index, ) if not _can_serialize(val): _raise_invalid( bad_val=val, outer_val=type(val).__name__, path="", index=index, toplevel=True, ) if isinstance(output_value, list): for i, val in enumerate(output_value): _validate_value(val, index=i) else: _validate_value(output_value) # if we got this far, raise a generic JSON error raise exceptions.InvalidCallbackReturnValue( """ The callback for output `{output}` returned a value which is not JSON serializable. In general, Dash properties can only be dash components, strings, dictionaries, numbers, None, or lists of those. """.format( output=repr(output) ) ) def check_obsolete(kwargs): for key in kwargs: if key in ["components_cache_max_age", "static_folder"]: raise exceptions.ObsoleteKwargException( """ {} is no longer a valid keyword argument in Dash since v1.0. See https://dash.plotly.com for details. """.format( key ) ) # any other kwarg mimic the built-in exception raise TypeError("Dash() got an unexpected keyword argument '" + key + "'") def validate_js_path(registered_paths, package_name, path_in_package_dist): if package_name not in registered_paths: raise exceptions.DependencyException( """ Error loading dependency. "{}" is not a registered library. Registered libraries are: {} """.format( package_name, list(registered_paths.keys()) ) ) if path_in_package_dist not in registered_paths[package_name]: raise exceptions.DependencyException( """ "{}" is registered but the path requested is not valid. The path requested: "{}" List of registered paths: {} """.format( package_name, path_in_package_dist, registered_paths ) ) def validate_index(name, checks, index): missing = [i for check, i in checks if not re.compile(check).search(index)] if missing: plural = "s" if len(missing) > 1 else "" raise exceptions.InvalidIndexException( "Missing item{pl} {items} in {name}.".format( items=", ".join(missing), pl=plural, name=name ) ) def validate_layout_type(value): if not isinstance(value, (Component, patch_collections_abc("Callable"))): raise exceptions.NoLayoutException( "Layout must be a dash component " "or a function that returns a dash component." ) def validate_layout(layout, layout_value): if layout is None: raise exceptions.NoLayoutException( """ The layout was `None` at the time that `run_server` was called. Make sure to set the `layout` attribute of your application before running the server. """ ) layout_id = stringify_id(getattr(layout_value, "id", None)) component_ids = {layout_id} if layout_id else set() for component in layout_value._traverse(): # pylint: disable=protected-access component_id = stringify_id(getattr(component, "id", None)) if component_id and component_id in component_ids: raise exceptions.DuplicateIdError( """ Duplicate component id found in the initial layout: `{}` """.format( component_id ) ) component_ids.add(component_id)
# Module def
# Extended Euclid's Algorithm for Modular Multiplicative Inverse def euclidean_mod_inverse(a, b): temp = b # Initialize variables t1, t2 = 0, 1 if b == 1: return 0 # Perform extended Euclid's algorithm until a > 1 while a > 1: quotient, remainder = divmod(a, b) a, b = b, remainder t1, t2 = t2 - t1 * quotient, t1 if (t2 < 0) : t2 += temp return t2 # Driver Code if __name__ == '__main__': num = 10 mod = 17 print( f"The Modular Multiplicative Inverse of {num} is : {euclidean_mod_inverse(num, mod)}")
from application import db # #Create your own models here and they will be imported automaticaly. or #use a model per blueprint. class User(db.Model): id = db.Column(db.Integer, primary_key=True) username = db.Column(db.String(80), unique=True) email = db.Column(db.String(80), unique=True) password = db.Column(db.String(80)) auth_token = db.Column(db.String(80), nullable=False) auth_secret = db.Column(db.String(80), nullable=False) consumer_key = db.Column(db.String(80), nullable=False) consumer_secret = db.Column(db.String(80), nullable=False) twitter_handle = db.Column(db.String(80), nullable=False) job_status = db.Column(db.Boolean(), unique=False, default=True) def __init__(self, username, email, password): self.username = username self.email = email self.password = password self.auth_token = '' self.auth_secret = '' self.consumer_key = '' self.consumer_secret = '' self.twitter_handle = '' self.job_status = False def set_password(self, password): self.password = password def check_password(self, password): if self.password == password: return True def is_authenticated(self): return True def is_active(self): return True def is_anonymous(self): return False def get_id(self): return unicode(self.id) def __repr__(self): return '<User %r>' % (self.username) class Hashtag(db.Model): id = db.Column(db.Integer, primary_key=True) tag = db.Column(db.String(100)) user_id = db.Column(db.Integer) def __init__(self, tag): self.tag = tag class AlreadyFollow(db.Model): id = db.Column(db.Integer, primary_key=True) class Log(db.Model): id = db.Column(db.Integer, primary_key=True) time = db.Column(db.DateTime) hostname = db.Column(db.String(20)) flagger = db.Column(db.Boolean) user_id = db.Column(db.Integer, db.ForeignKey('user.id')) user = db.relationship('User', backref='log', lazy='') def __init__(self, time, uptime, hostname, flagger, user_id): self.returns = 0 self.errors = 0 self.time = time self.hostname = hostname self.flagger = flagger self.user_id = user_id def __repr__(self): return '<Log %r>' % (self.hostname)
from __future__ import print_function import tensorflow as tf import numpy as np from scipy import ndimage from scipy.spatial.distance import euclidean from sklearn import metrics import TensorflowUtils as utils import read_Data_list as scene_parsing import BatchDatsetReader as dataset from six.moves import xrange IMAGE_SIZE = 224 NUM_OF_CLASSESS = 2 def vgg_net(weights, image): layers = ( 'conv1_1', 'relu1_1', 'conv1_2', 'relu1_2', 'pool1', 'conv2_1', 'relu2_1', 'conv2_2', 'relu2_2', 'pool2', 'conv3_1', 'relu3_1', 'conv3_2', 'relu3_2', 'conv3_3', 'relu3_3', 'conv3_4', 'relu3_4', 'pool3', 'conv4_1', 'relu4_1', 'conv4_2', 'relu4_2', 'conv4_3', 'relu4_3', 'conv4_4', 'relu4_4', 'pool4', 'conv5_1', 'relu5_1', 'conv5_2', 'relu5_2', 'conv5_3', 'relu5_3', 'conv5_4', 'relu5_4' ) net = {} current = image for i, name in enumerate(layers): kind = name[:4] if kind == 'conv': kernels, bias = weights[i][0][0][0][0] # matconvnet: weights are [width, height, in_channels, out_channels] # tensorflow: weights are [height, width, in_channels, out_channels] kernels = utils.get_variable(np.transpose(kernels, (1, 0, 2, 3)), name=name + "_w") bias = utils.get_variable(bias.reshape(-1), name=name + "_b") current = utils.conv2d_basic(current, kernels, bias) elif kind == 'relu': current = tf.nn.relu(current, name=name) elif kind == 'pool': current = utils.avg_pool_2x2(current) net[name] = current return net def inference(image, keep_prob): """ Semantic segmentation network definition :param image: input image. Should have values in range 0-255 :param keep_prob: :return: """ print("setting up vgg initialized conv layers ...") model_data = utils.get_model_data("Model/", 'http://www.vlfeat.org/matconvnet/models/beta16/imagenet-vgg-verydeep-19.mat') mean = model_data['normalization'][0][0][0] #224*224*3 mean_pixel = np.mean(mean, axis=(0, 1)) #return a mean pixel (3,) weights = np.squeeze(model_data['layers']) processed_image = utils.process_image(image, mean_pixel) #centerlize the images with tf.variable_scope("inference"): image_net = vgg_net(weights, processed_image) conv_final_layer = image_net["conv5_4"] #change the original conv5_3 VGG_stopped = tf.stop_gradient(conv_final_layer) #stop the BP pool5 = utils.max_pool_2x2(VGG_stopped) #(1,7,7,512) W6 = utils.weight_variable([7, 7, 512, 4096], name="W6") b6 = utils.bias_variable([4096], name="b6") conv6 = utils.conv2d_basic(pool5, W6, b6) relu6 = tf.nn.relu(conv6, name="relu6") relu_dropout6 = tf.nn.dropout(relu6, keep_prob=keep_prob) #(1,7,7,4096) W7 = utils.weight_variable([1, 1, 4096, 4096], name="W7") b7 = utils.bias_variable([4096], name="b7") conv7 = utils.conv2d_basic(relu_dropout6, W7, b7) relu7 = tf.nn.relu(conv7, name="relu7") relu_dropout7 = tf.nn.dropout(relu7, keep_prob=keep_prob) W8 = utils.weight_variable([1, 1, 4096, NUM_OF_CLASSESS], name="W8") b8 = utils.bias_variable([NUM_OF_CLASSESS], name="b8") conv8 = utils.conv2d_basic(relu_dropout7, W8, b8) # annotation_pred1 = tf.argmax(conv8, dimension=3, name="prediction1") # now to upscale to actual image size deconv_shape1 = image_net["pool4"].get_shape() W_t1 = utils.weight_variable([4, 4, deconv_shape1[3].value, NUM_OF_CLASSESS], name="W_t1") b_t1 = utils.bias_variable([deconv_shape1[3].value], name="b_t1") conv_t1 = utils.conv2d_transpose_strided(conv8, W_t1, b_t1, output_shape=tf.shape(image_net["pool4"])) fuse_1 = tf.add(conv_t1, image_net["pool4"], name="fuse_1") deconv_shape2 = image_net["pool3"].get_shape() W_t2 = utils.weight_variable([4, 4, deconv_shape2[3].value, deconv_shape1[3].value], name="W_t2") b_t2 = utils.bias_variable([deconv_shape2[3].value], name="b_t2") conv_t2 = utils.conv2d_transpose_strided(fuse_1, W_t2, b_t2, output_shape=tf.shape(image_net["pool3"])) fuse_2 = tf.add(conv_t2, image_net["pool3"], name="fuse_2") shape = tf.shape(image) deconv_shape3 = tf.stack([shape[0], shape[1], shape[2], NUM_OF_CLASSESS]) W_t3 = utils.weight_variable([16, 16, NUM_OF_CLASSESS, deconv_shape2[3].value], name="W_t3") b_t3 = utils.bias_variable([NUM_OF_CLASSESS], name="b_t3") conv_t3 = utils.conv2d_transpose_strided(fuse_2, W_t3, b_t3, output_shape=deconv_shape3, stride=8) annotation_pred = tf.argmax(conv_t3, dimension=3, name="prediction") return annotation_pred, conv_t3 def main(argv=None): keep_probability = tf.placeholder(tf.float32, name="keep_probabilty") image = tf.placeholder(tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], name="input_image") annotation = tf.placeholder(tf.int32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 1], name="annotation") r_img = np.random.randint(0,255,(1,IMAGE_SIZE,IMAGE_SIZE,3)) r_anno = np.random.randint(0,255,(1,IMAGE_SIZE,IMAGE_SIZE,1)) pred_annotation, logits = inference(image, keep_probability) labels=tf.squeeze(annotation, squeeze_dims=[3]) with tf.Session() as sess: sess.run(tf.global_variables_initializer()) logit= sess.run(logits, feed_dict = {image:r_img,keep_probability:1.0}) print(np.shape(logit)) label = sess.run(labels, feed_dict = {annotation:r_anno}) print(np.shape(label)) if __name__ == "__main__": tf.app.run()
from awscrt import io, mqtt, auth, http from awsiot import mqtt_connection_builder import json class MQTTManager: """Keeps the connections to MQTT, used for sending and receiving messages.""" mqtt_connection = None def __init__(self, cert_path: str, key_path: str, root_path: str, port: int, client_id: str, server: str): """ constructor. """ self.cert_path = cert_path self.key_path = key_path self.root_path = root_path self.port = port self.client_id = client_id self.server = server def __str__(self): """prints the object.""" return "MQTT Manager" def connect(self): """connects to the default values given.""" event_loop_group = io.EventLoopGroup(1) host_resolver = io.DefaultHostResolver(event_loop_group) client_bootstrap = io.ClientBootstrap(event_loop_group, host_resolver) self.mqtt_connection = mqtt_connection_builder.mtls_from_path( # check https://aws.github.io/aws-iot-device-sdk-python-v2/awsiot/mqtt_connection_builder.html client_bootstrap=client_bootstrap, # variable build above cert_filepath=self.cert_path, pri_key_filepath=self.key_path, ca_filepath=self.root_path, #port=self.port, client_id=self.client_id, endpoint=self.server, # AWS IoT Core custom endpoint URL clean_session=False, keep_alive_secs=60 ) print("Connecting to {} with client ID '{}'...".format( self.server, self.client_id)) # Make the connect() call connect_future = self.mqtt_connection.connect() # Future.result() waits until a result is available res = connect_future.result() print(res) print("Connected!") def disconnect(self): disconnect_future = self.mqtt_connection.disconnect() disconnect_future.result() def on_message(self, callback): """callback should take the following arguments and return nothing: topic (str): Topic receiving message. payload (bytes): Payload of message. dup (bool): DUP flag. If True, this might be re-delivery of an earlier attempt to send the message. qos (QoS): Quality of Service used to deliver the message. retain (bool): Retain flag. If True, the message was sent as a result of a new subscription being made by the client. **kwargs (dict): Forward-compatibility kwargs.""" self.mqtt_connection.on_message(callback) def add_topic(self, topic, on_message_received=None): """adds a topic to the mqtt client.""" print("Subscribing to topic '{}'...".format(topic)) subscribe_future, packet_id = self.mqtt_connection.subscribe( topic=topic, qos=mqtt.QoS.AT_LEAST_ONCE, callback=on_message_received) subscribe_result = subscribe_future.result() print("Subscribed with {}".format(str(subscribe_result['qos']))) def send_msg(self, topic, msg): """sends a message on the topic.""" print("Sending MQTT message: " + json.dumps(msg)) self.mqtt_connection.publish(topic=topic, payload=json.dumps(msg), qos=mqtt.QoS.AT_LEAST_ONCE) print("Published: '" + json.dumps(msg) + "' to the topic: " + topic)
import os import logging import json import re DOC_PACKAGE_FILENAME = "C:/github/azure-docs-sdk-java/package.json" def main(): logging.basicConfig(level=logging.INFO) with open(DOC_PACKAGE_FILENAME, 'r', encoding='utf-8') as f: lines = f.readlines() exclude_package_defined = False implementation_package = None out_lines = [] for line in lines: line_strip = line.strip() if line_strip == '{': exclude_package_defined = False implementation_package = None elif line_strip == '},' or line_strip == "}": if not exclude_package_defined and implementation_package: previous_line_no = len(out_lines) - 1 out_lines[previous_line_no] = out_lines[previous_line_no].rstrip() + ',\n' out_lines.append(f' "excludepackages": "{implementation_package}"\n') elif line_strip.startswith('"excludepackages": '): exclude_package_defined = True elif line_strip.startswith('"packageArtifactId": '): artifact_id = re.match('"packageArtifactId": "(.*)",?', line_strip).group(1) if artifact_id.startswith('azure-resourcemanager-'): package_suffix = artifact_id.removeprefix('azure-resourcemanager-') implementation_package = f'com.azure.resourcemanager.{package_suffix}.implementation' out_lines.append(line) with open(DOC_PACKAGE_FILENAME, 'w', encoding='utf-8') as f: f.write(''.join(out_lines)) main()
import base64 import json import traceback from typing import Any, List, Dict from opus import Asset from util.event_emitter import EventEmitter import websockets from websockets.server import WebSocketServerProtocol class ComponentPeer(EventEmitter): """ Base class for peers which accepts actions. Parameters ---------- target_types Target types this peer listens too (e.g. audio, video, image etc.) """ def __init__(self, target_types: List[str]): super().__init__() self._target_types = target_types self._websockets: List[WebSocketServerProtocol] = [] self._assets: List[Asset] = [] def add_asset(self, asset: Asset): self._assets.append(asset) async def handle_socket(self, websocket: WebSocketServerProtocol, initial_message: Any): """This handles one websocket connection.""" self._websockets.append(websocket) hashes = initial_message["files"] for asset in self._assets: if asset.data and hashes.get(asset.path) == asset.checksum: print(f"Asset {asset.path} already up to date") elif asset.data: print(f"Syncing asset {asset.path}") await websocket.send(json.dumps({"command": "file", "path": asset.path, "data": base64.b64encode(asset.data).decode("utf-8")})) else: print(f"Skipping sync of asset {asset.path} (no data)") # Handle messages from the client try: async for message in websocket: try: message_dict = json.loads(message) message_type = message_dict["messageType"] if message_type == "heartbeat": pass # Ignore heartbeats for now else: self.handle_component_message(message_type, message_dict) except Exception as e: print(f"Failed to handle component message. Got error {e}") traceback.print_exc() except websockets.exceptions.ConnectionClosedError as cce: print(f"Websocket to component closed abruptly: {cce}") # Websocket is closed self._websockets.remove(websocket) def nof_instances(self): return len(self._websockets) def send_to_all(self, data): websockets.broadcast(self._websockets, json.dumps(data)) def handles_target(self, target_type: str): """Checks whether this instance can handle actions of the given type.""" return target_type in self._target_types def handle_component_message(self, message_type: str, message: object): print(f"WARNING: Unknown message type {message_type}") def handle_action(self, target_type: str, cmd: str, assets: List[Asset], params: Dict[str, Any]): """Process the given action.""" print(f"Command not handled by subclass: {target_type}:{cmd}")
#!/usr/bin/env python """ Copyright (c) 2016 Pimoroni Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """ try: from setuptools import setup except ImportError: from distutils.core import setup classifiers = ['Development Status :: 5 - Production/Stable', 'Operating System :: POSIX :: Linux', 'License :: OSI Approved :: MIT License', 'Intended Audience :: Developers', 'Programming Language :: Python :: 2.6', 'Programming Language :: Python :: 2.7', 'Programming Language :: Python :: 3', 'Topic :: Software Development', 'Topic :: System :: Hardware'] setup( name='i2cdevice', version='0.0.6', author='Philip Howard', author_email='phil@pimoroni.com', description="""Python DSL for interacting with SMBus-compatible i2c devices""", long_description=open('README.rst').read() + "\n" + open('CHANGELOG.txt').read(), license='MIT', keywords='Raspberry Pi, SMBUS, i2c', url='http://www.pimoroni.com', project_urls={'GitHub': 'https://www.github.com/pimoroni/i2cdevice-python'}, classifiers=classifiers, packages=['i2cdevice'], install_requires=[] )
from django.test import TestCase from django.contrib.auth import get_user_model class CustomUserTests(TestCase): def test_create_user(self): User= get_user_model() user= User.objects.create_user( username='Nelg', email='nelg@liamg.com', password='testpass123', ) self.assertEqual(user.username,'Nelg') self.assertEqual(user.email, 'nelg@liamg.com') self.assertTrue(user.is_active) self.assertFalse(user.is_staff) self.assertFalse(user.is_superuser) def test_create_superuser(self): User = get_user_model() admin_user = User.objects.create_superuser( username="Superman", email='superman@gmail.com', password='testpass123' ) self.assertEqual(admin_user.username, 'Superman') self.assertEqual(admin_user.email, 'superman@gmail.com') self.assertTrue(admin_user.is_active) self.assertTrue(admin_user.is_staff) self.assertTrue(admin_user.is_superuser)
def helperFunction(self, root, result): if root: self.helperFunction(root.left, result) result.append(root.val) self.helperFunction(root.right, result) def inorderTraversal(self, root): """ :type root: TreeNode :rtype: List[int] """ result = [] self.helperFunction(root, result) return result
#!/usr/bin/env python3 from test_framework.test_framework import earn_save_invest_repeatTestFramework from test_framework.util import * """ Simple test checking chain movement after v5 enforcement. """ class MiningV5UpgradeTest(earn_save_invest_repeatTestFramework): def set_test_params(self): self.num_nodes = 1 self.extra_args = [[]] self.setup_clean_chain = True def run_test(self): assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'pending') self.nodes[0].generate(300) # v5 activation height assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'active') self.nodes[0].generate(25) # 25 more to check chain movement assert_equal(self.nodes[0].getblockchaininfo()['upgrades']['v5 shield']['status'], 'active') assert_equal(self.nodes[0].getblockcount(), 325) if __name__ == '__main__': MiningV5UpgradeTest().main()
N, *A = map(int, open(0).read().split()) A.sort() s = sum(A) result = 0 for i in range(N): a = A[i] s -= a result += s - a * (N - i - 1) print(result)
# Generated by Django 2.1.7 on 2019-04-30 13:39 import diventi.accounts.models from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('accounts', '0112_auto_20190430_1539'), ] operations = [ migrations.AlterModelManagers( name='diventiuser', managers=[ ('objects', diventi.accounts.models.DiventiUserManager()), ], ), ]
#!/usr/bin/env python # encoding: utf-8 ''' @author: caroline @license: (C) Copyright 2019-2022, Node Supply Chain Manager Corporation Limited. @contact: caroline.fang.cc@gmail.com @software: pycharm @file: zjk2_transfer.py @time: 2020/1/1 4:59 下午 @desc: ''' import json import logging import threading from datetime import datetime from time import sleep import requests import random import time import xlrd from requests.adapters import HTTPAdapter from urllib3 import Retry class myThread(threading.Thread): def __init__(self, threadID, rootAccount, account): threading.Thread.__init__(self) self.threadID = threadID self.rootAccount = rootAccount self.account = account def run(self): logging.info("开始线程:" + str(self.threadID)) print("开始线程:" + str(self.threadID)) # 一个账户执行 120次 # 实例 api = Api(self.rootAccount) api.times_of_120(self.account) # sleep(600) 休眠600/ times_of_120中的sleep(30) 等于1m执行20个 print("退出线程:" + str(self.threadID)) logging.info("退出线程:" + str(self.threadID)) class Api(): def __init__(self, rootAccount): i = random.randint(0, len(rootAccount) - 1) self.rootAccount = rootAccount[i] # 创建账户 def creat_account(self): # url = "http://47.75.98.179:15645" url = "http://39.98.39.224:35645" payload = "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"account_createAccount\",\n\t\"params\":[], \n\t\"id\": 3\n\t\n}" headers = { 'Content-Type': "application/json", 'cache-control': "no-cache", 'Postman-Token': "27a29181-18f4-4549-80c2-d23196a7df15" } try: response = requests.request("POST", url, data=payload, headers=headers) logging.info("creat_account_response:{}".format(response.text)) print("creat_account_response:{}".format(response.text)) jsonDic = json.loads(response.text) print("创建账户:" + jsonDic['result']) return jsonDic['result'] except Exception as e: logging.error("HTTP error {}".format(e)) print("HTTP error {}".format(e)) return "error Account", e # 发交易 0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c def privkey_api(self, count, rootAccount, recivice): logging.info("发送者: {} --> 接收者: {}".format(rootAccount, recivice)) print("发送者: {} --> 接收者: {}".format(rootAccount, recivice)) # url = "http://47.75.98.179:15645" url = "http://39.98.39.224:35645" payload = "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"account_transfer\",\n\t\"params\":[\"0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c\",\"0xde4541def39ca2393d159f6f407d225dfb653c22\",\"0x16800000000\",\"0x110\",\"0x30000\",\"\"],\n\t\"id\":1\n\t\n}" jsonDic = json.loads(payload) # print(jsonDic) jsonDic["params"][0] = rootAccount jsonDic["params"][1] = recivice # price = ["0x168000000000000", "0x18800000000", "0x16600000000", '0x1580000000', "0x368000000000", # "0x66800000000"] price = ["0x64"] jsonDic["params"][2] = price[random.randint(0, len(price) - 1)] payload = json.dumps(jsonDic) logging.info("recivice_account: {} -- data: {}".format(recivice, payload)) headers = { 'Content-Type': "application/json", 'cache-control': "no-cache", 'Postman-Token': "6616a7d2-3705-4d37-b779-ef595abde465", 'Connection': "close" } try: requests.adapters.DEFAULT_RETRIES = 5 session = requests.Session() session.keep_alive = False retry = Retry(connect=5, backoff_factor=0.5) adapter = HTTPAdapter(max_retries=retry) session.mount('http://', adapter) session.mount('https://', adapter) response = requests.request("POST", url, data=payload, headers=headers) logging.info('privkey_response: {}'.format(response.text)) print('privkey_response: {}'.format(response.text)) jsonDic = json.loads(response.text) return jsonDic except Exception as e: logging.error("接口报错{}".format(e)) return e # -1 默认接口调用失败 #print('******start*****************') def check_block(self): url = "http://39.98.39.224:35645" payload = "{\n\t\"jsonrpc\":\"2.0\",\n\t\"method\":\"chain_getMaxHeight\",\n\t\"params\":[],\n\t\"id\":1\n\t\n}" headers = { 'Content-Type': "application/json", 'cache-control': "no-cache", 'Postman-Token': "d3e4e378-5f3f-4318-afb7-88842840f014" } try: response = requests.request("POST", url, data=payload, headers=headers) logging.info("目前api区块高度为{}".format(response.text)) print("目前api区块高度为{}".format(response.text)) jsonDic = json.loads(response.text) # 将json转换成python字典 logging.info("将json转换成python字典{}".format(jsonDic)) # print("将json转换成python字典{}".format(jsonDic)) block_hight = jsonDic["result"] # 从response中得到当前区块高度 # print("当前区块高度为:{}".format(block_hight)) return block_hight except Exception as e: logging.error("获得区块高度错误{}".format(e)) print(e) return -1 # 执行 120次 transfer 函数 def times_of_120(self, account): count = 1 while True: # 单次运行 # if count > 120: # break # 永久运行 if count > 120: count = 1 sleep(3) continue try: global run, all_count if run == 1: # print("root", self.rootAccount) # print("acc", account) self.privkey_api(count, self.rootAccount, account) all_count = all_count + 1 logging.info( datetime.now().strftime("%Y-%m-%d %H:%M:%S") + "- 发起交易(收款账户) : " + account + "- 交易总次数{}".format( all_count)) # sleep(6) #60 else: logging.info('暂停交易') # sleep(3) #30 # sleep(3) except Exception as e: print("" "‘、’! {}".format(e)) logging.error("交易失败!{}".format(e)) count = count + 1 logging.info('执行一次交易休眠30s') print('执行一次交易休眠30s') # sleep(30) #休眠600/ times_of_120中的sleep(30) 等于1m执行20个 # 单个账户发送 120 次交易 # times_of_120(account) # 100个独立账户运行 def account_100_run_pay_120(peoples=100): # 实例 global rootAccount api = Api(rootAccount) # account_100 = [] for i in range(peoples): # 用户 xxx try: account = api.creat_account() # 为一个用户创建一个线程 独立运行 120 次交易 logging.info("创建用户: {}".format(i) + account) print("创建用户: {}".format(i) + account) # times_of_120(account) # account_100.append(account) threadX = myThread(i, rootAccount, account) threadX.start() # sleep(2) except Exception as e: print("账户创建失败!".format(e)) logging.error("账户创建失败!".format(e)) sleep(3) # 每n秒执行一次 def timer(n): global run, height, rootAccount api = Api(rootAccount) while True: account_100_run_pay_120(100000) heightNew = api.check_block() localtime = time.asctime(time.localtime(time.time())) # 得到本地时间 # print("本地时间为:{},区块高度: {}".format(localtime, heightNew)) if height == heightNew: run = 0 logging.error("高度相同,停止交易".format(heightNew)) else: height = heightNew run = 1 time.sleep(n) print(run, height) print("本地时间为:{},区块高度: {},run:{}".format(localtime, heightNew, run)) logging.info("本地时间为:{},区块高度: {},run:{}".format(localtime, heightNew, run)) def open_excel(file='test.xls'): '''打开文件''' try: data = xlrd.open_workbook(file) return data except Exception as e: print(e) def excel_table_byindex(file='test.xls', colnameindex=0, by_index=0): ''' 根据索引获取Excel表格中的数据 :param file: Excel文件路径 :param colnameindex: 表头列名所在行的索引 :param by_index: 表的索引 :return: 表中的数据 ''' data = open_excel(file) table = data.sheets()[by_index] nrows = table.nrows # 行数 # ncols = table.ncols #列数 colnames = table.row_values(colnameindex) # 某一行数据 list = [] for rownum in range(1, nrows): row = table.row_values(rownum) if row: app = {} for i in range(len(colnames)): app[colnames[i]] = row[i] list.append(app) return list def main(): ''' 新创建账号 读取表中的地址并对新创建的脏那港澳发起交易 :return:表中的地址 ''' tables = excel_table_byindex() print("表中的数据{}".format(tables)) root_account_list = [] for row in tables: print("账号为", row["余额不为零的账号"]) # 读取字典中key的值为“余额不为零的账号”的数据 root_account_list.append(row["余额不为零的账号"]) # unlockAccount("account_unlockAccount", row["余额不为零的账号"]) # 解锁excel中的账号地址 return root_account_list run = 1 height = 0 all_count = 0 # rootAccount = ["0xaD3dC2D8aedef155eabA42Ab72C1FE480699336c"] rootAccount = main() timer(10)
# coding: utf-8 # Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved. # This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license. from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401 from oci.decorators import init_model_state_from_kwargs @init_model_state_from_kwargs class HeatWaveClusterMemoryEstimate(object): """ HeatWave cluster memory estimate that can be used to determine a suitable HeatWave cluster size. For each MySQL user table the estimated memory footprint when the table is loaded to the HeatWave cluster memory is returned. """ #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "ACCEPTED" STATUS_ACCEPTED = "ACCEPTED" #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "IN_PROGRESS" STATUS_IN_PROGRESS = "IN_PROGRESS" #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "FAILED" STATUS_FAILED = "FAILED" #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "SUCCEEDED" STATUS_SUCCEEDED = "SUCCEEDED" #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "CANCELING" STATUS_CANCELING = "CANCELING" #: A constant which can be used with the status property of a HeatWaveClusterMemoryEstimate. #: This constant has a value of "CANCELED" STATUS_CANCELED = "CANCELED" def __init__(self, **kwargs): """ Initializes a new HeatWaveClusterMemoryEstimate object with values from keyword arguments. The following keyword arguments are supported (corresponding to the getters/setters of this class): :param db_system_id: The value to assign to the db_system_id property of this HeatWaveClusterMemoryEstimate. :type db_system_id: str :param status: The value to assign to the status property of this HeatWaveClusterMemoryEstimate. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :type status: str :param time_created: The value to assign to the time_created property of this HeatWaveClusterMemoryEstimate. :type time_created: datetime :param time_updated: The value to assign to the time_updated property of this HeatWaveClusterMemoryEstimate. :type time_updated: datetime :param table_schemas: The value to assign to the table_schemas property of this HeatWaveClusterMemoryEstimate. :type table_schemas: list[oci.mysql.models.HeatWaveClusterSchemaMemoryEstimate] """ self.swagger_types = { 'db_system_id': 'str', 'status': 'str', 'time_created': 'datetime', 'time_updated': 'datetime', 'table_schemas': 'list[HeatWaveClusterSchemaMemoryEstimate]' } self.attribute_map = { 'db_system_id': 'dbSystemId', 'status': 'status', 'time_created': 'timeCreated', 'time_updated': 'timeUpdated', 'table_schemas': 'tableSchemas' } self._db_system_id = None self._status = None self._time_created = None self._time_updated = None self._table_schemas = None @property def db_system_id(self): """ **[Required]** Gets the db_system_id of this HeatWaveClusterMemoryEstimate. The OCID of the DB System the HeatWave cluster memory estimate is associated with. :return: The db_system_id of this HeatWaveClusterMemoryEstimate. :rtype: str """ return self._db_system_id @db_system_id.setter def db_system_id(self, db_system_id): """ Sets the db_system_id of this HeatWaveClusterMemoryEstimate. The OCID of the DB System the HeatWave cluster memory estimate is associated with. :param db_system_id: The db_system_id of this HeatWaveClusterMemoryEstimate. :type: str """ self._db_system_id = db_system_id @property def status(self): """ **[Required]** Gets the status of this HeatWaveClusterMemoryEstimate. Current status of the Work Request generating the HeatWave cluster memory estimate. Allowed values for this property are: "ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED", 'UNKNOWN_ENUM_VALUE'. Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'. :return: The status of this HeatWaveClusterMemoryEstimate. :rtype: str """ return self._status @status.setter def status(self, status): """ Sets the status of this HeatWaveClusterMemoryEstimate. Current status of the Work Request generating the HeatWave cluster memory estimate. :param status: The status of this HeatWaveClusterMemoryEstimate. :type: str """ allowed_values = ["ACCEPTED", "IN_PROGRESS", "FAILED", "SUCCEEDED", "CANCELING", "CANCELED"] if not value_allowed_none_or_none_sentinel(status, allowed_values): status = 'UNKNOWN_ENUM_VALUE' self._status = status @property def time_created(self): """ **[Required]** Gets the time_created of this HeatWaveClusterMemoryEstimate. The date and time that the Work Request to generate the HeatWave cluster memory estimate was issued, as described by `RFC 3339`__. __ https://tools.ietf.org/rfc/rfc333 :return: The time_created of this HeatWaveClusterMemoryEstimate. :rtype: datetime """ return self._time_created @time_created.setter def time_created(self, time_created): """ Sets the time_created of this HeatWaveClusterMemoryEstimate. The date and time that the Work Request to generate the HeatWave cluster memory estimate was issued, as described by `RFC 3339`__. __ https://tools.ietf.org/rfc/rfc333 :param time_created: The time_created of this HeatWaveClusterMemoryEstimate. :type: datetime """ self._time_created = time_created @property def time_updated(self): """ **[Required]** Gets the time_updated of this HeatWaveClusterMemoryEstimate. The date and time that the HeatWave cluster memory estimate was generated, as described by `RFC 3339`__. __ https://tools.ietf.org/rfc/rfc333 :return: The time_updated of this HeatWaveClusterMemoryEstimate. :rtype: datetime """ return self._time_updated @time_updated.setter def time_updated(self, time_updated): """ Sets the time_updated of this HeatWaveClusterMemoryEstimate. The date and time that the HeatWave cluster memory estimate was generated, as described by `RFC 3339`__. __ https://tools.ietf.org/rfc/rfc333 :param time_updated: The time_updated of this HeatWaveClusterMemoryEstimate. :type: datetime """ self._time_updated = time_updated @property def table_schemas(self): """ **[Required]** Gets the table_schemas of this HeatWaveClusterMemoryEstimate. Collection of schemas with estimated memory footprints for MySQL user tables of each schema when loaded to HeatWave cluster memory. :return: The table_schemas of this HeatWaveClusterMemoryEstimate. :rtype: list[oci.mysql.models.HeatWaveClusterSchemaMemoryEstimate] """ return self._table_schemas @table_schemas.setter def table_schemas(self, table_schemas): """ Sets the table_schemas of this HeatWaveClusterMemoryEstimate. Collection of schemas with estimated memory footprints for MySQL user tables of each schema when loaded to HeatWave cluster memory. :param table_schemas: The table_schemas of this HeatWaveClusterMemoryEstimate. :type: list[oci.mysql.models.HeatWaveClusterSchemaMemoryEstimate] """ self._table_schemas = table_schemas def __repr__(self): return formatted_flat_dict(self) def __eq__(self, other): if other is None: return False return self.__dict__ == other.__dict__ def __ne__(self, other): return not self == other
# -*- coding: utf-8 -*- # Copyright (c) 2019, Ben Glazier and contributors # For license information, please see license.txt from frappe.model.document import Document class OnlineSellingItem(Document): pass
# This file was automatically generated by SWIG (http://www.swig.org). # Version 1.3.31 # # Don't modify this file, modify the SWIG interface instead. # This file is compatible with both classic and new-style classes. import _cchardet import new new_instancemethod = new.instancemethod try: _swig_property = property except NameError: pass # Python < 2.2 doesn't have 'property'. def _swig_setattr_nondynamic(self,class_type,name,value,static=1): if (name == "thisown"): return self.this.own(value) if (name == "this"): if type(value).__name__ == 'PySwigObject': self.__dict__[name] = value return method = class_type.__swig_setmethods__.get(name,None) if method: return method(self,value) if (not static) or hasattr(self,name): self.__dict__[name] = value else: raise AttributeError("You cannot add attributes to %s" % self) def _swig_setattr(self,class_type,name,value): return _swig_setattr_nondynamic(self,class_type,name,value,0) def _swig_getattr(self,class_type,name): if (name == "thisown"): return self.this.own() method = class_type.__swig_getmethods__.get(name,None) if method: return method(self) raise AttributeError,name def _swig_repr(self): try: strthis = "proxy of " + self.this.__repr__() except: strthis = "" return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,) import types try: _object = types.ObjectType _newclass = 1 except AttributeError: class _object : pass _newclass = 0 del types CHARDET_RESULT_OK = _cchardet.CHARDET_RESULT_OK CHARDET_RESULT_NOMEMORY = _cchardet.CHARDET_RESULT_NOMEMORY CHARDET_RESULT_INVALID_DETECTOR = _cchardet.CHARDET_RESULT_INVALID_DETECTOR CHARDET_ENCODING_ISO_2022_JP = _cchardet.CHARDET_ENCODING_ISO_2022_JP CHARDET_ENCODING_ISO_2022_CN = _cchardet.CHARDET_ENCODING_ISO_2022_CN CHARDET_ENCODING_ISO_2022_KR = _cchardet.CHARDET_ENCODING_ISO_2022_KR CHARDET_ENCODING_ISO_8859_5 = _cchardet.CHARDET_ENCODING_ISO_8859_5 CHARDET_ENCODING_ISO_8859_7 = _cchardet.CHARDET_ENCODING_ISO_8859_7 CHARDET_ENCODING_ISO_8859_8 = _cchardet.CHARDET_ENCODING_ISO_8859_8 CHARDET_ENCODING_BIG5 = _cchardet.CHARDET_ENCODING_BIG5 CHARDET_ENCODING_GB18030 = _cchardet.CHARDET_ENCODING_GB18030 CHARDET_ENCODING_EUC_JP = _cchardet.CHARDET_ENCODING_EUC_JP CHARDET_ENCODING_EUC_KR = _cchardet.CHARDET_ENCODING_EUC_KR CHARDET_ENCODING_EUC_TW = _cchardet.CHARDET_ENCODING_EUC_TW CHARDET_ENCODING_SHIFT_JIS = _cchardet.CHARDET_ENCODING_SHIFT_JIS CHARDET_ENCODING_IBM855 = _cchardet.CHARDET_ENCODING_IBM855 CHARDET_ENCODING_IBM866 = _cchardet.CHARDET_ENCODING_IBM866 CHARDET_ENCODING_KOI8_R = _cchardet.CHARDET_ENCODING_KOI8_R CHARDET_ENCODING_MACCYRILLIC = _cchardet.CHARDET_ENCODING_MACCYRILLIC CHARDET_ENCODING_WINDOWS_1251 = _cchardet.CHARDET_ENCODING_WINDOWS_1251 CHARDET_ENCODING_WINDOWS_1252 = _cchardet.CHARDET_ENCODING_WINDOWS_1252 CHARDET_ENCODING_WINDOWS_1253 = _cchardet.CHARDET_ENCODING_WINDOWS_1253 CHARDET_ENCODING_WINDOWS_1255 = _cchardet.CHARDET_ENCODING_WINDOWS_1255 CHARDET_ENCODING_UTF_8 = _cchardet.CHARDET_ENCODING_UTF_8 CHARDET_ENCODING_UTF_16BE = _cchardet.CHARDET_ENCODING_UTF_16BE CHARDET_ENCODING_UTF_16LE = _cchardet.CHARDET_ENCODING_UTF_16LE CHARDET_ENCODING_UTF_32BE = _cchardet.CHARDET_ENCODING_UTF_32BE CHARDET_ENCODING_UTF_32LE = _cchardet.CHARDET_ENCODING_UTF_32LE CHARDET_ENCODING_HZ_GB_2312 = _cchardet.CHARDET_ENCODING_HZ_GB_2312 CHARDET_ENCODING_X_ISO_10646_UCS_4_3412 = _cchardet.CHARDET_ENCODING_X_ISO_10646_UCS_4_3412 CHARDET_ENCODING_X_ISO_10646_UCS_4_2143 = _cchardet.CHARDET_ENCODING_X_ISO_10646_UCS_4_2143 CHARDET_ENCODING_ISO_8859_2 = _cchardet.CHARDET_ENCODING_ISO_8859_2 CHARDET_ENCODING_WINDOWS_1250 = _cchardet.CHARDET_ENCODING_WINDOWS_1250 CHARDET_ENCODING_TIS_620 = _cchardet.CHARDET_ENCODING_TIS_620 chardet_create = _cchardet.chardet_create chardet_destroy = _cchardet.chardet_destroy chardet_handle_data = _cchardet.chardet_handle_data chardet_data_end = _cchardet.chardet_data_end chardet_reset = _cchardet.chardet_reset chardet_get_charset = _cchardet.chardet_get_charset
# Aula 13 (Estrutura de Repetição for) for c in range(1, 50): if c % 2 == 0: print(c, end=' ') print('Acabou.')
# -*- coding: utf-8 -*- # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import os import sys import inspect import shutil __location__ = os.path.join(os.getcwd(), os.path.dirname( inspect.getfile(inspect.currentframe()))) # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.join(__location__, '../src')) # -- Run sphinx-apidoc ------------------------------------------------------ # This hack is necessary since RTD does not issue `sphinx-apidoc` before running # `sphinx-build -b html . _build/html`. See Issue: # https://github.com/rtfd/readthedocs.org/issues/1139 # DON'T FORGET: Check the box "Install your project inside a virtualenv using # setup.py install" in the RTD Advanced Settings. # Additionally it helps us to avoid running apidoc manually try: # for Sphinx >= 1.7 from sphinx.ext import apidoc except ImportError: from sphinx import apidoc output_dir = os.path.join(__location__, "api") module_dir = os.path.join(__location__, "../src/mesonet") try: shutil.rmtree(output_dir) except FileNotFoundError: pass try: import sphinx from pkg_resources import parse_version cmd_line_template = "sphinx-apidoc -f -o {outputdir} {moduledir}" cmd_line = cmd_line_template.format(outputdir=output_dir, moduledir=module_dir) args = cmd_line.split(" ") if parse_version(sphinx.__version__) >= parse_version('1.7'): args = args[1:] apidoc.main(args) except Exception as e: print("Running `sphinx-apidoc` failed!\n{}".format(e)) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. # needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.autodoc', 'sphinx.ext.intersphinx', 'sphinx.ext.todo', 'sphinx.ext.autosummary', 'sphinx.ext.viewcode', 'sphinx.ext.coverage', 'sphinx.ext.doctest', 'sphinx.ext.ifconfig', 'sphinx.ext.mathjax', 'sphinx.ext.napoleon'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. # source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Mesonet-DB' copyright = u'2019, R. Kyle Bocinsky' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '' # Is set by calling `setup.py docs` # The full version, including alpha/beta/rc tags. release = '' # Is set by calling `setup.py docs` # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. # language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: # today = '' # Else, today_fmt is used as the format for a strftime call. # today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = ['_build'] # The reST default role (used for this markup: `text`) to use for all documents. # default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. # add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). # add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. # show_authors = False # The name of the Pygments (syntax highlighting) style to use. pygments_style = 'sphinx' # A list of ignored prefixes for module index sorting. # modindex_common_prefix = [] # If true, keep warnings as "system message" paragraphs in the built documents. # keep_warnings = False # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'alabaster' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. html_theme_options = { 'sidebar_width': '300px', 'page_width': '1200px' } # Add any paths that contain custom themes here, relative to this directory. # html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". try: from mesonet import __version__ as version except ImportError: pass else: release = version # A shorter title for the navigation bar. Default is the same as html_title. # html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. # html_logo = "" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. # html_favicon = None # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. # html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. # html_use_smartypants = True # Custom sidebar templates, maps document names to template names. # html_sidebars = {} # Additional templates that should be rendered to pages, maps page names to # template names. # html_additional_pages = {} # If false, no module index is generated. # html_domain_indices = True # If false, no index is generated. # html_use_index = True # If true, the index is split into individual pages for each letter. # html_split_index = False # If true, links to the reST sources are added to the pages. # html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. # html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. # html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. # html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). # html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'mesonet-doc' # -- Options for LaTeX output -------------------------------------------------- latex_elements = { # The paper size ('letterpaper' or 'a4paper'). # 'papersize': 'letterpaper', # The font size ('10pt', '11pt' or '12pt'). # 'pointsize': '10pt', # Additional stuff for the LaTeX preamble. # 'preamble': '', } # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'user_guide.tex', u'Mesonet-DB Documentation', u'R. Kyle Bocinsky', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. # latex_logo = "" # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. # latex_use_parts = False # If true, show page references after internal links. # latex_show_pagerefs = False # If true, show URL addresses after external links. # latex_show_urls = False # Documents to append as an appendix to all manuals. # latex_appendices = [] # If false, no module index is generated. # latex_domain_indices = True # -- External mapping ------------------------------------------------------------ python_version = '.'.join(map(str, sys.version_info[0:2])) intersphinx_mapping = { 'sphinx': ('http://www.sphinx-doc.org/en/stable', None), 'python': ('https://docs.python.org/' + python_version, None), 'matplotlib': ('https://matplotlib.org', None), 'numpy': ('https://docs.scipy.org/doc/numpy', None), 'sklearn': ('http://scikit-learn.org/stable', None), 'pandas': ('http://pandas.pydata.org/pandas-docs/stable', None), 'scipy': ('https://docs.scipy.org/doc/scipy/reference', None), }
# Generated by Django 2.0.9 on 2018-12-28 12:39 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('bucket', '0002_auto_20181228_2135'), ] operations = [ migrations.AlterField( model_name='bucket', name='creator', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='bucket', to=settings.AUTH_USER_MODEL), ), ]
import base64 def get_data_from_file(file_path: str): """ Get data from file and base64 encode it """ if file_path: with open(file_path, "rb") as stream: file_data = stream.read() data = base64.b64encode(file_data).decode("utf-8") return data
""" inflateorg.py Inflate or shrink the membrane to resolve clash between membrane and protein. Handles the primary functions """ import os import shutil from subprocess import call from pkg_resources import resource_filename import MDAnalysis as mda import numpy as np os.environ["GMX_MAXBACKUP"] = "-1" gromacs = '/usr/local/gromacs/2018.8/bin/gmx' mdp = resource_filename(__name__, 'data/minim.mdp') grompp = '{gromacs} grompp -f minim.mdp -c {gro} -p {topol} -o em.tpr -maxwarn {maxwarn} -r {gro}' mdrun = '{gromacs} mdrun -deffnm em' trjconv_pbc = 'echo 0 | {gromacs} trjconv -f em.gro -s em.tpr -o em.gro -pbc mol' trjconv_check = 'echo 0 | {gromacs} trjconv -f pre_inflation.gro -s em.tpr -o em.gro -pbc mol' class InflateORG(): def __init__(self, start_file='start.gro', topol='topol.top', center='protein', mobile='not protein', sep=None, scaling_factor = 0.95, dim = [1,1,0], cutoff=1, maxwarn=0): ''' :param start_file: The coordinate file for the InflateAny program. :param topol: The topology file compatible with the gromacs program :param center: The center of the inflation which is not modified :param mobile: The peripheral which will be expanded and shirked :param sep: Define how to separate the peripheral. :param scaling_factor: The factor of inflation at each :param dim: The dimension of the scaling on x, y and z axis. default is 1,1,1. :param cutoff: Cutoff distance where two particles are considered as separate. ''' self.start_file = start_file self.topol = topol self.center = center self.mobile = mobile self.sep = sep self.scaling_factor = scaling_factor self.dim = np.array(dim) self.cutoff =cutoff self.maxwarn = maxwarn self.sanity_check() self.inflate_system() self.shrink_system() def sanity_check(self): ''' Check if the input is correct. ''' u = mda.Universe(self.start_file) u.select_atoms('({}) or ({})'.format(self.center, self.mobile)).write('pre_inflation.gro') try: call(grompp.format(gromacs=gromacs, gro='pre_inflation.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True) call(trjconv_check.format(gromacs=gromacs), shell=True) shutil.move('em.gro', 'pre_inflation.gro') except: print('Make sure the mdp file (minim.mdp), the topology file ({}) and the input coordinate file ({}) is correct.'.format( self.topol, self.start_file )) os.mkdir('InflateAny') os.remove('em.tpr') shutil.move('pre_inflation.gro', 'InflateAny/pre_inflation.gro') shutil.copy('minim.mdp', 'InflateAny/minim.mdp') os.chdir('InflateAny') self.topol = '../' + self.topol def separate_molecule(self, selection=None, sep=None): ''' Separate the selection into defferent entities. :param u: input universe :return: A list of the atom groups which will be scaled. ''' if self.sep is None: # By default the separtion is based on residue id mobile_parts = u.select_atoms(self.mobile).residues return [residue.atoms for residue in mobile_parts] else: return [u.select_atoms(part) for part in self.sep] def inflate(self, u, scaling_factor): ''' :param u: the input MDAnalysis Universe to which scaling will be applied ''' # dimensions = u.dimensions # center_of_geometry = u.select_atoms(self.center).center_of_geometry() # u.atoms.translate(-center_of_geometry) # # for part in self.separate_mobile(u): # vector = part.center_of_geometry() # part.translate((vector * (scaling_factor - 1))*self.dim) # u.atoms.translate(center_of_geometry * scaling_factor) # dimensions[:3] = dimensions[:3] * scaling_factor # u.dimensions = dimensions # return u def inflate_system(self): ''' Inflate the system. :return: ''' u = mda.Universe('pre_inflation.gro') repeat = True count = 0 print('Start inflating the {}'.format(self.mobile)) while repeat: count += 1 u = self.inflate(u, 1 / self.scaling_factor) check = u.select_atoms('{} and around {} ({})'.format(self.mobile, self.cutoff, self.center)) print('Interation {}:'.format(count)) print('Atoms with {}A of {}:'.format(self.cutoff, self.center)) print(check) if len(check) == 0: 'No atom in contact with {}.'.format(self.center) repeat = False print('Begin the shrinking process.') u.atoms.write('inflated.gro') call(grompp.format(gromacs=gromacs, gro='inflated.gro', topol=self.topol, maxwarn=self.maxwarn), shell=True) self.mdrun(gromacs=gromacs) call(trjconv_pbc.format(gromacs=gromacs), shell=True) shutil.copy('em.gro', 'inflated_em.gro') self.count = count def shrink_system(self): for i in range(self.count): print('Interation {}:'.format(i)) u = mda.Universe('em.gro') u = self.inflate(u, self.scaling_factor) u.atoms.write('shrinked_{}.gro'.format(i)) call(grompp.format(gromacs=gromacs, gro='shrinked_{}.gro'.format(i), topol=self.topol, maxwarn=self.maxwarn), shell=True) self.mdrun(gromacs=gromacs) call(trjconv_pbc.format(gromacs=gromacs), shell=True) shutil.copy2('em.gro', 'equilibrated.gro') shutil.copy2('em.gro', '../equilibrated.gro') os.chdir('../') def mdrun(self, gromacs=gromacs, additional=''): # Try to get around the situation where opencl won't start repeat = True while repeat: returncode = call(mdrun.format(gromacs=gromacs) + ' ' + additional, shell=True) if returncode == 0: repeat = False
n = int(input()) for i in range(1, n + 1): tsum = sum(map(int, input().split())) print("Case #{}: {}".format(i, tsum))