text stringlengths 38 1.54M |
|---|
import pandas as pd
df = pd.read_csv('train/images.csv')
num_classes = len(pd.unique(df['label']))
numbers_to_classes = {}
classes_to_numbers = {}
for n, plankton_cat in enumerate(df['label'].unique()):
numbers_to_classes[n] = plankton_cat
classes_to_numbers[plankton_cat] = n
list = ['image']
for n, values in enumerate(numbers_to_classes.values()):
list.append(values)
df2 = pd.read_csv('checkpoint0.csv', names=list)
df2.to_csv('checkpoint0_mod.csv', index=False) |
import sqlite3
from datetime import date
def conectar_banco(nome_bd):
return sqlite3.connect(nome_bd)
def criar_cursor(conexao):
return conexao.cursor()
def criar_tabela_computador(cursor):
comando = \
'CREATE TABLE IF NOT EXISTS' \
' computador (' \
' codigo INTEGER PRIMARY KEY,' \
' nome VARCHAR,' \
' aquisicao DATE,' \
' vida INTEGER,' \
' marca, VARCHAR' \
' )'
executar_comando(cursor, comando)
def executar_comando(cursor, comando, parametros=None):
if parametros:
cursor.execute(comando, parametros)
else:
cursor.execute(comando)
return cursor
# PRINCIPAL
conexao = conectar_banco(':memory:') # criar o banco de dados na memória. Encerrado o programa, o BD deixa de existir.
print(type(conexao)) # <class 'sqlite3.Connection'>
cursor = criar_cursor(conexao)
print(type(cursor)) # <class 'sqlite3.Cursor'>
criar_tabela_computador(cursor)
|
from matplotlib import pyplot as plt
from sklearn.metrics import precision_recall_curve, roc_curve
def evaluating_models( y_test, y_predicted, name):
########################################################################################################################
# ####### EVALUATION ####### #
########################################################################################################################
precision = dict()
recall = dict()
for i in range(4):
precision[i], recall[i], _ = precision_recall_curve(y_test[:, i],
y_predicted[:, i])
plt.plot(recall[i], precision[i], lw=2, label='class {}'.format(i))
plt.xlabel("recall")
plt.ylabel("precision")
plt.legend(loc="best")
plt.title("precision vs. recall curve for "+name)
plt.show()
# roc curve
fpr = dict()
tpr = dict()
for i in range(4):
fpr[i], tpr[i], _ = roc_curve(y_test[:, i],
y_predicted[:, i])
plt.plot(fpr[i], tpr[i], lw=2, label='class {}'.format(i))
plt.xlabel("false positive rate")
plt.ylabel("true positive rate")
plt.legend(loc="best")
plt.title("ROC curve for "+name)
plt.show() |
#
# Copyright 2017-2019 B-Open Solutions srl.
# Copyright 2017-2019 European Centre for Medium-Range Weather Forecasts (ECMWF).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import keyword
import os
import pkgutil
import signal
import tempfile
import builtins
from enum import Enum
import cffi
import numpy as np
def string_from_ffi(s):
return ffi.string(s).decode('utf-8')
# -----------------------------------------------------------------------------
# Startup
# -----------------------------------------------------------------------------
class MetviewInvoker:
"""Starts a new Metview session on construction and terminates it on program exit"""
def __init__(self):
"""
Constructor - starts a Metview session and reads its environment information
Raises an exception if Metview does not respond within 5 seconds
"""
self.debug = (os.environ.get("METVIEW_PYTHON_DEBUG", '0') == '1')
# check whether we're in a running Metview session
if 'METVIEW_TITLE_PROD' in os.environ:
self.persistent_session = True
self.info_section = {'METVIEW_LIB': os.environ['METVIEW_LIB']}
return
import atexit
import time
import subprocess
if self.debug:
print('MetviewInvoker: Invoking Metview')
self.persistent_session = False
self.metview_replied = False
self.metview_startup_timeout = 5 # seconds
# start Metview with command-line parameters that will let it communicate back to us
env_file = tempfile.NamedTemporaryFile(mode='rt')
pid = os.getpid()
# print('PYTHON:', pid, ' ', env_file.name, ' ', repr(signal.SIGUSR1))
signal.signal(signal.SIGUSR1, self.signal_from_metview)
# p = subprocess.Popen(['metview', '-edbg', 'tv8 -a', '-slog', '-python-serve',
# env_file.name, str(pid)], stdout=subprocess.PIPE)
metview_startup_cmd = os.environ.get("METVIEW_PYTHON_START_CMD", 'metview')
metview_flags = [metview_startup_cmd, '-nocreatehome', '-python-serve',
env_file.name, str(pid)]
if self.debug:
metview_flags.insert(2, '-slog')
print('Starting Metview using these command args:')
print(metview_flags)
try:
subprocess.Popen(metview_flags)
except Exception as exp:
print("Could not run the Metview executable ('" + metview_startup_cmd + "'); "
"check that the binaries for Metview (version 5 at least) are installed "
"and are in the PATH.")
raise exp
# wait for Metview to respond...
wait_start = time.time()
while (not(self.metview_replied) and
(time.time() - wait_start < self.metview_startup_timeout)):
time.sleep(0.001)
if not(self.metview_replied):
raise Exception('Command "metview" did not respond within '
+ str(self.metview_startup_timeout) + ' seconds. '
'At least Metview 5 is required, so please ensure it is in your PATH, '
'as earlier versions will not work with the Python interface.')
self.read_metview_settings(env_file.name)
# when the Python session terminates, we should destroy this object so that the Metview
# session is properly cleaned up. We can also do this in a __del__ function, but there can
# be problems with the order of cleanup - e.g. the 'os' module might be deleted before
# this destructor is called.
atexit.register(self.destroy)
def destroy(self):
"""Kills the Metview session. Raises an exception if it could not do it."""
if self.persistent_session:
return
if self.metview_replied:
if self.debug:
print('MetviewInvoker: Closing Metview')
metview_pid = self.info('EVENT_PID')
try:
os.kill(int(metview_pid), signal.SIGUSR1)
except Exception as exp:
print("Could not terminate the Metview process pid=" + metview_pid)
raise exp
def signal_from_metview(self, *args):
"""Called when Metview sends a signal back to Python to say that it's started"""
# print ('PYTHON: GOT SIGNAL BACK FROM METVIEW!')
self.metview_replied = True
def read_metview_settings(self, settings_file):
"""Parses the settings file generated by Metview and sets the corresponding env vars"""
import configparser
cf = configparser.ConfigParser()
cf.read(settings_file)
env_section = cf['Environment']
for envar in env_section:
# print('set ', envar.upper(), ' = ', env_section[envar])
os.environ[envar.upper()] = env_section[envar]
self.info_section = cf['Info']
def info(self, key):
"""Returns a piece of Metview information that was not set as an env var"""
return self.info_section[key]
def store_signal_handlers(self):
"""Stores the set of signal handlers that Metview will override"""
self.sigint = signal.getsignal(signal.SIGINT)
self.sighup = signal.getsignal(signal.SIGHUP)
self.sighquit = signal.getsignal(signal.SIGQUIT)
self.sigterm = signal.getsignal(signal.SIGTERM)
self.sigalarm = signal.getsignal(signal.SIGALRM)
def restore_signal_handlers(self):
"""Restores the set of signal handlers that Metview has overridden"""
signal.signal(signal.SIGINT, self.sigint)
signal.signal(signal.SIGHUP, self.sighup)
signal.signal(signal.SIGQUIT, self.sighquit)
signal.signal(signal.SIGTERM, self.sigterm)
signal.signal(signal.SIGALRM, self.sigalarm)
mi = MetviewInvoker()
try:
ffi = cffi.FFI()
ffi.cdef(pkgutil.get_data('metview', 'metview.h').decode('ascii'))
mv_lib = mi.info('METVIEW_LIB')
# is there a more general way to add to a path to a list of paths?
os.environ["LD_LIBRARY_PATH"] = mv_lib + ':' + os.environ.get("LD_LIBRARY_PATH", '')
try:
# Linux / Unix systems
lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro.so'))
except OSError:
# MacOS systems
lib = ffi.dlopen(os.path.join(mv_lib, 'libMvMacro'))
except Exception as exp:
print('Error loading Metview/libMvMacro. LD_LIBRARY_PATH='
+ os.environ.get("LD_LIBRARY_PATH", ''))
raise exp
# The C/C++ code behind lib.p_init() will call marsinit(), which overrides various signal
# handlers. We don't necessarily want this when running a Python script - we should use
# the default Python behaviour for handling signals, so we save the current signals
# before calling p_init() and restore them after.
mi.store_signal_handlers()
lib.p_init()
mi.restore_signal_handlers()
# -----------------------------------------------------------------------------
# Classes to handle complex Macro types
# -----------------------------------------------------------------------------
class Value:
def __init__(self, val_pointer):
self.val_pointer = val_pointer
def push(self):
if self.val_pointer is None:
lib.p_push_nil()
else:
lib.p_push_value(self.val_pointer)
# if we steal a value pointer from a temporary Value object, we need to
# ensure that the Metview Value is not destroyed when the temporary object
# is destroyed by setting its pointer to None
def steal_val_pointer(self, other):
self.val_pointer = other.val_pointer
other.val_pointer = None
# enable a more object-oriented interface, e.g. a = fs.interpolate(10, 29.4)
def __getattr__(self, fname):
def call_func_with_self(*args, **kwargs):
return call(fname, self, *args, **kwargs)
return call_func_with_self
# on destruction, ensure that the Macro Value is also destroyed
def __del__(self):
try:
if self.val_pointer is not None and lib is not None:
lib.p_destroy_value(self.val_pointer)
self.val_pointer = None
except Exception as exp:
print("Could not destroy Metview variable ", self)
raise exp
class Request(dict, Value):
verb = "UNKNOWN"
def __init__(self, req):
self.val_pointer = None
# initialise from Python object (dict/Request)
if isinstance(req, dict):
self.update(req)
self.to_metview_style()
if isinstance(req, Request):
self.verb = req.verb
self.val_pointer = req.val_pointer
# initialise from a Macro pointer
else:
Value.__init__(self, req)
self.verb = string_from_ffi(lib.p_get_req_verb(req))
n = lib.p_get_req_num_params(req)
for i in range(0, n):
param = string_from_ffi(lib.p_get_req_param(req, i))
raw_val = lib.p_get_req_value(req, param.encode('utf-8'))
if raw_val != ffi.NULL:
val = string_from_ffi(raw_val)
self[param] = val
# self['_MACRO'] = 'BLANK'
# self['_PATH'] = 'BLANK'
def __str__(self):
return "VERB: " + self.verb + super().__str__()
# translate Python classes into Metview ones where needed
def to_metview_style(self):
for k, v in self.items():
# bool -> on/off
if isinstance(v, bool):
conversion_dict = {True: 'on', False: 'off'}
self[k] = conversion_dict[v]
# class_ -> class (because 'class' is a Python keyword and cannot be
# used as a named parameter)
elif k == 'class_':
self['class'] = v
del self['class_']
def push(self):
# if we have a pointer to a Metview Value, then use that because it's more
# complete than the dict
if self.val_pointer:
Value.push(self)
else:
r = lib.p_new_request(self.verb.encode('utf-8'))
# to populate a request on the Macro side, we push each
# value onto its stack, and then tell it to create a new
# parameter with that name for the request. This allows us to
# use Macro to handle the addition of complex data types to
# a request
for k, v in self.items():
push_arg(v)
lib.p_set_request_value_from_pop(r, k.encode('utf-8'))
lib.p_push_request(r)
def __getitem__(self, index):
return subset(self, index)
def push_bytes(b):
lib.p_push_string(b)
def push_str(s):
push_bytes(s.encode('utf-8'))
def push_list(lst):
# ask Metview to create a new list, then add each element by
# pusing it onto the stack and asking Metview to pop it off
# and add it to the list
mlist = lib.p_new_list(len(lst))
for i, val in enumerate(lst):
push_arg(val)
lib.p_add_value_from_pop_to_list(mlist, i)
lib.p_push_list(mlist)
def push_date(d):
lib.p_push_datestring(np.datetime_as_string(d).encode('utf-8'))
def push_datetime(d):
lib.p_push_datestring(d.isoformat().encode('utf-8'))
def push_datetime_date(d):
s = d.isoformat() + 'T00:00:00'
lib.p_push_datestring(s.encode('utf-8'))
def push_vector(npa):
# convert numpy array to CData
if npa.dtype == np.float64:
cffi_buffer = ffi.cast('double*', npa.ctypes.data)
lib.p_push_vector_from_double_array(cffi_buffer, len(npa), np.nan)
elif npa.dtype == np.float32:
cffi_buffer = ffi.cast('float*', npa.ctypes.data)
lib.p_push_vector_from_float32_array(cffi_buffer, len(npa), np.nan)
else:
raise Exception('Only float32 and float64 numPy arrays can be passed to Metview, not ',
npa.dtype)
class FileBackedValue(Value):
def __init__(self, val_pointer):
Value.__init__(self, val_pointer)
def url(self):
# ask Metview for the file relating to this data (Metview will write it if necessary)
return string_from_ffi(lib.p_data_path(self.val_pointer))
class FileBackedValueWithOperators(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def __add__(self, other):
return add(self, other)
def __sub__(self, other):
return sub(self, other)
def __mul__(self, other):
return prod(self, other)
def __truediv__(self, other):
return div(self, other)
def __pow__(self, other):
return power(self, other)
def __ge__(self, other):
return greater_equal_than(self, other)
def __gt__(self, other):
return greater_than(self, other)
def __le__(self, other):
return lower_equal_than(self, other)
def __lt__(self, other):
return lower_than(self, other)
def __eq__(self, other):
return equal(self, other)
def __ne__(self, other):
return met_not_eq(self, other)
class ContainerValue(Value):
def __init__(self, val_pointer, macro_index_base, element_type, support_slicing):
Value.__init__(self, val_pointer)
self.idx = 0
self.macro_index_base = macro_index_base
self.element_type = element_type # the type of elements that the container contains
self.support_slicing = support_slicing
def __len__(self):
if self.val_pointer is None:
return 0
else:
return int(count(self))
def __getitem__(self, index):
if isinstance(index, slice):
if self.support_slicing:
indices = index.indices(len(self))
fields = [self[i] for i in range(*indices)]
if len(fields) == 0:
return None
else:
f = fields[0]
for i in range(1, len(fields)):
f = merge(f, fields[i])
return f
else:
raise Exception('This object does not support extended slicing: ' + str(self))
else: # normal index
if isinstance(index, str): # can have a string as an index
return subset(self, index)
else:
return subset(self, index + self.macro_index_base) # numeric index: 0->1-based
def __setitem__(self, index, value):
if (isinstance(value, self.element_type)):
lib.p_set_subvalue(self.val_pointer, index + self.macro_index_base, value.val_pointer)
else:
raise Exception('Cannot assign ', value, ' as element of ', self)
def __iter__(self):
return self
def __next__(self):
if self.idx >= self.__len__():
self.idx = 0
raise StopIteration
else:
self.idx += 1
return self.__getitem__(self.idx - 1)
class Fieldset(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer=None, path=None):
FileBackedValue.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 1, Fieldset, True)
if path is not None:
temp = read(path)
self.steal_val_pointer(temp)
def append(self, other):
temp = merge(self, other)
self.steal_val_pointer(temp)
def to_dataset(self):
# soft dependency on cfgrib
try:
from cfgrib import xarray_store
except ImportError:
print("Package cfgrib/xarray_store not found. Try running 'pip install cfgrib'.")
raise
dataset = xarray_store.open_dataset(self.url())
return dataset
class Bufr(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
class Geopoints(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 0, None, False)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
# create a dictionary of columns (note that we do not include 'time'
# because it is incorporated into 'date')
cols = self.columns()
if 'time' in cols:
cols.remove('time')
pddict = {}
for c in cols:
pddict[c] = self[c]
df = pd.DataFrame(pddict)
return df
class NetCDF(FileBackedValueWithOperators):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
def to_dataset(self):
# soft dependency on xarray
try:
import xarray as xr
except ImportError:
print("Package xarray not found. Try running 'pip install xarray'.")
raise
dataset = xr.open_dataset(self.url())
return dataset
class Odb(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
cols = self.columns()
pddict = {}
for col in cols:
pddict[col] = self.values(col)
df = pd.DataFrame(pddict)
return df
class Table(FileBackedValue):
def __init__(self, val_pointer):
FileBackedValue.__init__(self, val_pointer)
def to_dataframe(self):
try:
import pandas as pd
except ImportError:
print("Package pandas not found. Try running 'pip install pandas'.")
raise
df = pd.read_csv(self.url())
return df
class GeopointSet(FileBackedValueWithOperators, ContainerValue):
def __init__(self, val_pointer):
FileBackedValueWithOperators.__init__(self, val_pointer)
ContainerValue.__init__(self, val_pointer, 1, Geopoints, False)
# -----------------------------------------------------------------------------
# Pushing data types to Macro
# -----------------------------------------------------------------------------
def dataset_to_fieldset(val, **kwarg):
# we try to import xarray as locally as possible to reduce startup time
# try to write the xarray as a GRIB file, then read into a fieldset
import xarray as xr
import cfgrib
if not isinstance(val, xr.core.dataset.Dataset):
raise TypeError('dataset_to_fieldset requires a variable of type xr.core.dataset.Dataset;'
' was supplied with ', builtins.type(val))
f, tmp = tempfile.mkstemp(".grib")
os.close(f)
try:
# could add keys, e.g. grib_keys={'centre': 'ecmf'})
cfgrib.to_grib(val, tmp, **kwarg)
except:
print("Error trying to write xarray dataset to GRIB for conversion to Metview Fieldset")
raise
# TODO: tell Metview that this is a temporary file that should be deleted when no longer needed
fs = read(tmp)
return fs
def push_xarray_dataset(val):
fs = dataset_to_fieldset(val)
fs.push()
# try_to_push_complex_type exists as a separate function so that we don't have
# to import xarray at the top of the module - this saves some time on startup
def try_to_push_complex_type(val):
import xarray as xr
if isinstance(val, xr.core.dataset.Dataset):
push_xarray_dataset(val)
else:
raise TypeError('Cannot push this type of argument to Metview: ', builtins.type(val))
class ValuePusher():
"""Class to handle pushing values to the Macro library"""
def __init__(self):
# a set of pairs linking value types with functions to push them to Macro
# note that Request must come before dict, because a Request inherits from dict;
# this ordering requirement also means we should use list or tuple instead of a dict
self.funcs = (
(float, lambda n : lib.p_push_number(n)),
((int, np.number), lambda n : lib.p_push_number(float(n))),
(str, lambda n : push_str(n)),
(Request, lambda n : n.push()),
(dict, lambda n : Request(n).push()),
((list, tuple), lambda n : push_list(n)),
(type(None), lambda n : lib.p_push_nil()),
(FileBackedValue, lambda n : n.push()),
(np.datetime64, lambda n : push_date(n)),
(datetime.datetime, lambda n : push_datetime(n)),
(datetime.date, lambda n : push_datetime_date(n)),
(np.ndarray, lambda n : push_vector(n)),
)
def push_value(self, val):
for typekey, typefunc in self.funcs:
if isinstance(val, typekey):
typefunc(val)
return 1
# if we haven't returned yet, then try the more complex types
try_to_push_complex_type(val)
return 1
vp = ValuePusher()
def push_arg(n):
return vp.push_value(n)
def dict_to_pushed_args(d):
# push each key and value onto the argument stack
for k, v in d.items():
push_str(k)
push_arg(v)
return 2 * len(d) # return the number of arguments generated
# -----------------------------------------------------------------------------
# Returning data types from Macro
# -----------------------------------------------------------------------------
def list_from_metview(val):
mlist = lib.p_value_as_list(val)
result = []
n = lib.p_list_count(mlist)
all_vectors = True
for i in range(0, n):
mval = lib.p_list_element_as_value(mlist, i)
v = value_from_metview(mval)
if all_vectors and not isinstance(v, np.ndarray):
all_vectors = False
result.append(v)
# if this is a list of vectors, then create a 2-D numPy array
if all_vectors and n > 0:
result = np.stack(result, axis=0)
return result
def datestring_from_metview(val):
mdate = string_from_ffi(lib.p_value_as_datestring(val))
dt = datetime.datetime.strptime(mdate, "%Y-%m-%dT%H:%M:%S")
return dt
def vector_from_metview(val):
vec = lib.p_value_as_vector(val, np.nan)
n = lib.p_vector_count(vec)
s = lib.p_vector_elem_size(vec)
if s == 4:
nptype = np.float32
b = lib.p_vector_float32_array(vec)
elif s == 8:
nptype = np.float64
b = lib.p_vector_double_array(vec)
else:
raise Exception('Metview vector data type cannot be handled: ', s)
bsize = n * s
c_buffer = ffi.buffer(b, bsize)
np_array = np.frombuffer(c_buffer, dtype=nptype)
return np_array
def handle_error(val):
msg = string_from_ffi(lib.p_error_message(val))
if "Service" in msg and "Examiner" in msg:
return None
else:
return Exception('Metview error: ' + (msg))
def string_from_metview(val):
return string_from_ffi(lib.p_value_as_string(val))
class MvRetVal(Enum):
tnumber = 0
tstring = 1
tgrib = 2
trequest = 3
tbufr = 4
tgeopts = 5
tlist = 6
tnetcdf = 7
tnil = 8
terror = 9
tdate = 10
tvector = 11
todb = 12
ttable = 13
tgptset = 14
tunknown = 99
class ValueReturner():
"""Class to handle return values from the Macro library"""
def __init__(self):
self.funcs = {}
self.funcs[MvRetVal.tnumber.value] = lambda val : lib.p_value_as_number(val)
self.funcs[MvRetVal.tstring.value] = lambda val : string_from_metview(val)
self.funcs[MvRetVal.tgrib.value] = lambda val : Fieldset(val)
self.funcs[MvRetVal.trequest.value] = lambda val : Request(val)
self.funcs[MvRetVal.tbufr.value] = lambda val : Bufr(val)
self.funcs[MvRetVal.tgeopts.value] = lambda val : Geopoints(val)
self.funcs[MvRetVal.tlist.value] = lambda val : list_from_metview(val)
self.funcs[MvRetVal.tnetcdf.value] = lambda val : NetCDF(val)
self.funcs[MvRetVal.tnil.value] = lambda val : None
self.funcs[MvRetVal.terror.value] = lambda val : handle_error(val)
self.funcs[MvRetVal.tdate.value] = lambda val : datestring_from_metview(val)
self.funcs[MvRetVal.tvector.value] = lambda val : vector_from_metview(val)
self.funcs[MvRetVal.todb.value] = lambda val : Odb(val)
self.funcs[MvRetVal.ttable.value] = lambda val : Table(val)
self.funcs[MvRetVal.tgptset.value] = lambda val : GeopointSet(val)
def translate_return_val(self, val):
rt = lib.p_value_type(val)
try:
return self.funcs[rt](val)
except Exception:
raise Exception('value_from_metview got an unhandled return type: ' + str(rt))
vr = ValueReturner()
def value_from_metview(val):
retval = vr.translate_return_val(val)
if isinstance(retval, Exception):
raise retval
return retval
# -----------------------------------------------------------------------------
# Creating and calling Macro functions
# -----------------------------------------------------------------------------
def _call_function(mfname, *args, **kwargs):
nargs = 0
for n in args:
actual_n_args = push_arg(n)
nargs += actual_n_args
merged_dict = {}
merged_dict.update(kwargs)
if len(merged_dict) > 0:
dn = dict_to_pushed_args(Request(merged_dict))
nargs += dn
lib.p_call_function(mfname.encode('utf-8'), nargs)
def make(mfname):
def wrapped(*args, **kwargs):
err = _call_function(mfname, *args, **kwargs)
if err:
pass # throw Exceception
val = lib.p_result_as_value()
return value_from_metview(val)
return wrapped
def bind_functions(namespace, module_name=None):
"""Add to the module globals all metview functions except operators like: +, &, etc."""
for metview_name in make('dictionary')():
if metview_name.isidentifier():
python_name = metview_name
# NOTE: we append a '_' to metview functions that clash with python reserved keywords
# as they cannot be used as identifiers, for example: 'in' -> 'in_'
if keyword.iskeyword(metview_name):
python_name += '_'
python_func = make(metview_name)
python_func.__name__ = python_name
python_func.__qualname__ = python_name
if module_name:
python_func.__module__ = module_name
namespace[python_name] = python_func
# else:
# print('metview function %r not bound to python' % metview_name)
# add the 'mvl' functions, which are written in Macro and therefore not
# listed by the dictionary() function
for f in ['mvl_ml2hPa', 'mvl_create_netcdf_2d', 'mvl_flextra_etadot', 'mvl_geocircle',
'mvl_geoline', 'mvl_geopotential_on_ml', 'mvl_mxn_subframes', 'mvl_plot_scm_data',
'mvl_regular_layout', 'mvl_regular_layout_area', 'thermo_data_info',
'thermo_parcel_path', 'thermo_parcel_area', 'xy_curve', 'potential_temperature',
'temperature_from_potential_temperature', 'saturation_mixing_ratio', 'mixing_ratio',
'vapour_pressure', 'saturation_vapour_pressure',
'lifted_condensation_level', 'divergence', 'vorticity', 'laplacian',
'geostrophic_wind_pl', 'geostrophic_wind_ml']:
namespace[f] = make(f)
# HACK: some fuctions are missing from the 'dictionary' call.
namespace['neg'] = make('neg')
namespace['nil'] = make('nil')
# override some functions that need special treatment
# FIXME: this needs to be more structured
namespace['plot'] = plot
namespace['setoutput'] = setoutput
namespace['dataset_to_fieldset'] = dataset_to_fieldset
namespace['Fieldset'] = Fieldset
# some explicit bindings are used here
add = make('+')
call = make('call')
count = make('count')
div = make('/')
equal = make('=')
filter = make('filter')
greater_equal_than = make('>=')
greater_than = make('>')
lower_equal_than = make('<=')
lower_than = make('<')
merge = make('&')
met_not_eq = make('<>')
met_plot = make('plot')
nil = make('nil')
png_output = make('png_output')
power = make('^')
prod = make('*')
ps_output = make('ps_output')
read = make('read')
met_setoutput = make('setoutput')
sub = make('-')
subset = make('[]')
# -----------------------------------------------------------------------------
# Particular code for calling the plot() command
# -----------------------------------------------------------------------------
class Plot():
def __init__(self):
self.plot_to_jupyter = False
def __call__(self, *args, **kwargs):
if self.plot_to_jupyter:
f, tmp = tempfile.mkstemp(".png")
os.close(f)
base, ext = os.path.splitext(tmp)
met_setoutput(png_output(output_name=base, output_name_first_page_number='off'))
met_plot(*args)
image = Image(tmp)
os.unlink(tmp)
return image
else:
map_outputs = {
'png': png_output,
'ps': ps_output,
}
if 'output_type' in kwargs:
output_function = map_outputs[kwargs['output_type'].lower()]
kwargs.pop('output_type')
met_plot(output_function(kwargs), *args)
else:
met_plot(*args)
# the Macro plot command returns an empty definition, but
# None is better for Python
return None
plot = Plot()
# On a test system, importing IPython took approx 0.5 seconds, so to avoid that hit
# under most circumstances, we only import it when the user asks for Jupyter
# functionality. Since this occurs within a function, we need a little trickery to
# get the IPython functions into the global namespace so that the plot object can use them
def setoutput(*args):
if 'jupyter' in args:
try:
global Image
global get_ipython
IPython = __import__('IPython', globals(), locals())
Image = IPython.display.Image
get_ipython = IPython.get_ipython
except ImportError as imperr:
print('Could not import IPython module - plotting to Jupyter will not work')
raise imperr
# test whether we're in the Jupyter environment
if get_ipython() is not None:
plot.plot_to_jupyter = True
else:
print("ERROR: setoutput('jupyter') was set, but we are not in a Jupyter environment")
raise(Exception('Could not set output to jupyter'))
else:
plot.plot_to_jupyter = False
met_setoutput(*args)
|
import sys
sys.path.insert(1, 'D:\Sync\Advanced Database Topics\FinalProject\Max Repo\ADTFinalProject\src\MongoDBAtlasAPI')
import pandas as pd
import datetime
import time
from MongoDBAtlasAPIAuthentication import MongoDBAtlasAPIAuthentication
if __name__ == "__main__":
mdbaa = MongoDBAtlasAPIAuthentication()
client = mdbaa.get_mongodb_client()
# print client state
db=client.StockMarket
collection_names = db.list_collection_names()
for col_num, col in enumerate(collection_names):
collection=db[col]
cursor = collection.find({},{"_id":0,"companySymbol":1,"date":1})
count =0
for document in cursor:
dateForOneDoc = document.get('date')
if (dateForOneDoc != None):
#print(document.get('companySymbol'))
formatedDate = datetime.datetime.fromtimestamp(dateForOneDoc).strftime('%c')
print(formatedDate)
print(count) |
# This Task is the base task that we will be executing as a second step (see task_piping.py)
# In order to make sure this experiment is registered in the platform, you must execute it once.
from clearml import Task
# Initialize the task pipe's first task used to start the task pipe
task = Task.init('examples', 'Toy Base Task')
# Create a dictionary for hyper-parameters
params = dict()
# Add a parameter and value to the dictionary
params['Example_Param'] = 1
# Connect the hyper-parameter dictionary to the task
task.connect(params)
# Print the value to demonstrate it is the value is set by the initiating task.
print("Example_Param is {}".format(params['Example_Param']))
|
import json
from typing import Iterable
from confluent_kafka.cimpl import Producer
from pk_kafka.consumers.exceptions import MessageValueException
class KafkaProducer:
"""
Start a new Producer to publish message to topic
"""
def __init__(self, broker_address, handle_json_message_data=True):
"""
Init the main class
:param broker_address: the broker address
:param handle_json_message_data: True if you want to handle json formatted data, False otherwise
"""
self.broker_address = broker_address
self.producer = Producer({'bootstrap.servers': self.broker_address})
self.handle_json_message_data = handle_json_message_data
def publish_message(self, topic, message):
"""
Create the producer, check for provided serializable message
and publish it to the topic
:param topic: the topic whose message will published to
:param message: the message to be published
"""
def delivery_report(err, msg):
""" Called once for each message produced to indicate delivery result.
Triggered by poll() or flush(). """
if err is not None:
print('Message delivery failed: {}'.format(err))
else:
print('Message delivered to {} [{}]'.format(msg.topic(), msg.partition()))
# Trigger any available delivery report callbacks from previous produce() calls
self.producer.poll(0)
# Asynchronously produce a message, the delivery report callback
# will be triggered from poll() above, or flush() below, when the message has
# been successfully delivered or failed permanently.
value_to_publish = message
if self.handle_json_message_data:
if type(message) not in (dict, list):
raise MessageValueException("Your message should be json serializable!")
value_to_publish = json.dumps(value_to_publish)
self.producer.produce(topic, value_to_publish.encode('utf8'), callback=delivery_report)
# Wait for any outstanding messages to be delivered and delivery report
# callbacks to be triggered.
self.producer.flush()
def publish_messages(self, topic, messages):
"""
Publish multiple messages
:param topic: the topic whose messages will be published to
:param messages: the list of elements to be published
"""
assert isinstance(messages, Iterable)
for message in messages:
self.publish_message(topic, message) |
#coding: utf-8
from os import remove as rm
from os.path import basename, dirname, join as path_join, realpath, isdir
from sh import unzip
import tempfile
from osext.filesystem import rmdir_force, sync as dir_sync
import httpext as http
import os
import langutil.php as php
class WordPressError(Exception):
pass
class WordPressConfigurationError(WordPressError):
pass
class WordPress:
"""For installing and managing WordPress sites"""
LATEST_URI = 'http://wordpress.org/latest.zip'
DL_FORMAT = 'http://wordpress.org/wordpress-%s.zip'
_path = None
_basename = None
_dirname = None
_is_initialized = False
def __init__(self, path):
"""
Args:
path (str): Path to installation. Does not have to exist as
init_dir() can be used to initialise a new installation.
"""
self._path = realpath(path)
self._basename = basename(path)
self._dirname = dirname(path)
self._is_initialized = isdir(self._path)
def _diff_list(self, a, b):
return filter(lambda x: x not in a, b)
def init_dir(self, version='latest', config={}, table_prefix='wp_'):
"""Initialises a new WordPress installation.
Kwargs:
version (str): Version number or ``'latest'``.
config (dict): Configuration. Must have keys ``'db_name'``,
``'db_user'``, ``'db_password'`` at minimum.
table_prefix (str): Table prefix.
Raises:
WordPressError, WordPressConfigurationError
Optional ``config`` keys (all str):
db_host: Database host.
db_charset: Database character set (MySQL).
db_collate: Database collation ('' for default).
wplang: Language code.
auth_key: Authentication key.
secure_auth_key: Secure authentication key.
logged_in_key: Logged in key.
nonce_key: Nonce key.
"""
if self._is_initialized:
raise WordPressError('Directory %s already exists' % (self._path))
uri = self.LATEST_URI
cache = True
if version != 'latest':
uri = self.DL_FORMAT % (version)
cache = False
prev_listing = os.listdir('.')
dir_name = path_join(tempfile.gettempdir(), '__wp__')
http.dl(uri, '_wp.zip', cache=cache)
unzip(['-d', dir_name, '_wp.zip'])
rm('_wp.zip')
dir_name = path_join(dir_name, 'wordpress')
os.rename(dir_name, self._path)
defaults = {
'db_host': 'localhost',
'db_charset': 'utf8',
'db_collate': '',
'wplang': '',
'auth_key': ',Qi8F3A:ME>+!G*|a!>zbW!GWe,A9rHR@tL.4sFCE}LR0][j/995U'
'+4*3H:i]]DH',
'secure_auth_key': 'UjN_-SP+Whq/^taB31&lg$fj0-<XSgKy@UzK*B-k-4aiT9'
'~m^s_vT[dE,5P;kx(E',
'logged_in_key': '2dfV^z4rJqrSEdQc.ec)KJC UZv$#)OhJKRY~Vj9+]M-]CIB'
'L(RvGZ|[C!S|]MOv',
'nonce_key': '.Ue WG1NN/cKo^MC53$_U0!V>Mtdw-ar$rP8o+;rawQ)B$9LlAAL'
'<@GLoXS_POaa',
}
keys = [
'db_name',
'db_user',
'db_password',
'db_host',
'db_collate',
'db_charset',
'auth_key',
'secure_auth_key',
'logged_in_key',
'nonce_key',
'wplang',
]
line_format = 'define(%s, %s);'
wp_config_php = ['<?php']
for key in keys:
if key in config:
value = config[key]
elif key in defaults:
value = defaults[key]
else:
raise WordPressConfigurationError('Configuration key %s is required' %
(key))
wp_config_php.append(line_format %
(php.generate_scalar(key.upper()),
php.generate_scalar(value)))
wp_config_php.append('$table_prefix = %s;' %
(php.generate_scalar(table_prefix)))
lines = '\n'.join(wp_config_php) + '\n'
lines += '''if (!defined('ABSPATH'))
define('ABSPATH', dirname(__FILE__) . '/');
require_once(ABSPATH . 'wp-settings.php');'''
lines += '\n'
with open(path_join(self._path, 'wp-config.php'), 'wb+') as f:
f.write(lines)
os.remove(path_join(self._path, 'wp-config-sample.php'))
def sync_assets(self, remote_path):
raise Exception('Not implemented')
|
# External module imports
import RPi.GPIO as GPIO
import time
import led_control as ledc
from play_song import play_song
"""
Pin list:
GND - black wire) - 6
LED Control - green - GPIO18 - 12
Computer - purple - GPIO17 - 11
Motor 1 - yellow - GPIO23 - 16
Motor 2 - orange - GPIO24 - 18
Power Strip - blue - GPIO22 - 15
"""
# define wire locations
w_led = 18
w_comp = 17
w_motor_1 = 23
w_motor_2 = 24
w_power_strip = 22
# other parameters
light_delay = 0.5
def initialize():
global w_comp
global w_motor_1
global w_motor_2
global w_power_strip
GPIO.setmode(GPIO.BCM)
GPIO.setup([w_comp, w_motor_1, w_motor_2, w_power_strip], GPIO.OUT)
GPIO.output([w_comp, w_motor_1, w_motor_2, w_power_strip], GPIO.LOW)
def cleanup():
GPIO.cleanup()
def turn_on_lights():
global w_motor_1
global w_motor_2
global light_delay
GPIO.output(w_motor_1, GPIO.LOW)
GPIO.output(w_motor_2, GPIO.HIGH)
time.sleep(light_delay + 0.2)
GPIO.output(w_motor_1, GPIO.HIGH)
GPIO.output(w_motor_2, GPIO.LOW)
time.sleep(0.5 * light_delay)
GPIO.output(w_motor_1, GPIO.LOW)
def turn_off_lights():
global w_motor_1
global w_motor_2
global light_delay
GPIO.output(w_motor_2, GPIO.LOW)
GPIO.output(w_motor_1, GPIO.HIGH)
time.sleep(light_delay)
GPIO.output(w_motor_2, GPIO.HIGH)
GPIO.output(w_motor_1, GPIO.LOW)
time.sleep(0.5 * light_delay)
GPIO.output(w_motor_2, GPIO.LOW)
def set_power_strip(state):
global w_power_strip
if state != True:
GPIO.output(w_power_strip, GPIO.HIGH)
else:
GPIO.output(w_power_strip, GPIO.LOW)
def turn_on_computer():
global w_comp
GPIO.output(w_comp, GPIO.HIGH)
time.sleep(1)
GPIO.output(w_comp, GPIO.LOW)
def turn_off_computer():
global w_comp
GPIO.output(w_comp, GPIO.HIGH)
time.sleep(6)
GPIO.output(w_comp, GPIO.LOW)
"""
Given an (object, action) pair, attempts to execute the command. May throw an
error if the pairing is invalid or the command is not successfully executed
"""
def execute_command(cmd, strip, pya):
print(cmd)
obj = cmd[0]
act = cmd[1]
if obj in {'light', 'lights'}:
if act == 'off':
turn_off_lights()
return True
if act == 'on':
turn_on_lights()
return True
if obj in {'computer'}:
if act == 'off':
turn_off_computer()
return True
if act == 'on':
turn_on_computer()
return True
if obj in {'fan'}:
if act == 'off':
set_power_strip(False)
return True
if act == 'on':
set_power_strip(True)
return True
if obj in {'leds', 'led'}:
if act == 'off':
ledc.clear(strip)
return True
if obj in {'leds', 'led', 'color'}:
if act == 'red':
ledc.setRGBColor(strip, (1, 0, 0))
return True
if act == 'orange':
ledc.setRGBColor(strip, (1, 0.5, 0))
return True
if act == 'yellow':
ledc.setRGBColor(strip, (1, 1, 0))
return True
if act == 'green':
ledc.setRGBColor(strip, (0, 1, 0))
return True
if act == 'blue':
ledc.setRGBColor(strip, (0, 0, 1))
return True
if act == 'indigo':
ledc.setRGBColor(strip, (0, 0, 0.5))
return True
if act == 'purple':
ledc.setRGBColor(strip, (1, 0, 1))
return True
if act == 'pink':
ledc.setRGBColor(strip, (1, 0, 0.5))
return True
if act == 'white':
ledc.setRGBColor(strip, (1, 1, 1))
return True
if obj in {'play'}:
if act in {'thunder struck', 'thunderstruck'}:
play_song(strip, 'thunder_struck', pya)
return True
if act == 'sweet dreams':
play_song(strip, 'sweet_dreams', pya)
return True
return False
|
import pandas
# load all necessary libraries
import pylab as pl
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
# loading data
variables = pandas.read_csv(r'C:\Users\bsidd\Downloads\Python_Lesson6\Python_Lesson6\sample_stocks.csv')
#load returns, dividendyield to x,y
Y = variables[['returns']]
X = variables[['dividendyield']]
# assume a cluster in range 1,20
Nc = range(1, 20)
# for every cluster calculate kmeans
kmeans = [KMeans(n_clusters=i) for i in Nc]
kmeans
# calculate score
score = [kmeans[i].fit(Y).score(Y) for i in range(len(kmeans))]
score
# plot the score and clusters
pl.plot(Nc,score)
pl.xlabel('Number of Clusters')
pl.ylabel('Score')
# use elbow curve to get the appropriate number of clusters
pl.title('Elbow Curve')
pl.show()
# use PCA to reduce dimensionality and interpret datapoints
pca = PCA(n_components=1).fit(Y)
# transform the x,y using PCA
pca_d = pca.transform(Y)
pca_c = pca.transform(X)
# here the best number of clusters is 3
kmeans=KMeans(n_clusters=3)
# fit data using kmeans
kmeansoutput=kmeans.fit(Y)
kmeansoutput
pl.figure('3 Cluster K-Means')
# scatter all datapoints into respective clusters
pl.scatter(pca_c[:, 0], pca_d[:, 0], c=kmeansoutput.labels_)
pl.xlabel('Dividend Yield')
pl.ylabel('Returns')
pl.show()
|
from django.conf.urls import patterns, url
from .views import FsAuth, FsCallback
urlpatterns = patterns('',
# Receive OAuth token from 4sq.
url(r'^callback$', FsCallback.as_view(), name='oauth_return'),
# Authenticate with 4sq using OAuth.
url(r'^auth$', FsAuth.as_view(), name='oauth_auth'),
)
|
class Solution:
def minAbsoluteSumDiff(self, nums1: List[int], nums2: List[int]) -> int:
modulo = 10**9 + 7
abs_diff = [abs(nums1[i] - nums2[i]) for i in range(len(nums1))]
ans = sum(abs_diff)
#if len(set(abs_diff)) == 1:
if ans == 0:
# i.e. all diff are 0
return ans
improvements = [0 for i in range(len(nums1))]
for i, diff in enumerate(abs_diff):
if diff == 0:
pass
else:
min_diff = diff
for j in range(len(nums1)):
new_diff = abs(nums1[j] - nums2[i])
#improvements[i] = max(diff - new_diff, 0)
#if new_diff < abs_diff[i]:
if new_diff < min_diff:
#improvements[i] = min_diff - new_diff
min_diff = new_diff
improvements[i] = diff - min_diff
ans -= max(improvements)
return ans % modulo
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
from geometry_msgs.msg import TwistWithCovarianceStamped
from PID import Controller
def main():
rospy.init_node('blimp_controller', anonymous=True)
rate = rospy.Rate(10) # 10hz
controller = Controller()
while not rospy.is_shutdown():
rate.sleep()
if __name__ == '__main__':
try:
main()
except rospy.ROSInterruptException:
pass |
class Module:
"""
ネットワークの構造に入るも
"""
def __init__(self):
pass
def build(self, inputs_shape):
return inputs_shape
def forward(self, inputs):
return inputs
def backward(self, delta):
return delta
class Layer(Module):
"""
重みの更新が必要なもの
"""
def __init__(self):
super().__init__()
self.weights = None
self.bias = None
self.delta = None
def update_weight(self, epsilon):
pass
class Activator(Module):
def __init__(self):
super().__init__()
class Loss:
def __init__(self):
pass
def loss(self, y, y_):
raise NotImplementedError
def delta(self, y, y_):
raise NotImplementedError
class Optimizer:
def __init__(self):
pass
|
json_file = "manage.json"
guild_id = 582705077572075535
schedule_channel_id = 598833688402198528
#カリンさんを動かすのに必要
zatsudan_channel_id = 582956584624324618
minerva_id = 634687198842716160
clan_dicts = [{"role_id":582952905653485568,
"command_channel":607602971512930345,
"remain_totsu_channel":636913889598242836,
"task_kill_channel":582957215221153795},
{"role_id":629676827568504842,
"command_channel":647959897224511519,
"remain_totsu_channel":654681025879998477,
"task_kill_channel":648152812944883724}]
sunglass = "😎"
#bowing = "🙇♂️"
ok_hand = "👌"
|
import os
import numpy as np
import h5py
import argparse
import time
import logging
import keras
import keras.backend as K
from sklearn import metrics
from keras.models import Model
from keras.optimizers import Adam
from keras.layers import Input, Dense, BatchNormalization, Dropout, Lambda, Activation, Concatenate
#from tensorflow.keras import backend as K
#External .py scripts
from lib import utilities
from lib import data_generator
try:
import cPickle
except BaseException:
import _pickle as cPickle
def evaluateCore(args, model, input, target, stats_dir, probs_dir, iteration):
"""Evaluate a model.
Args:
model: object
output: 2d array, (samples_num, classes_num)
target: 2d array, (samples_num, classes_num)
stats_dir: str, directory to write out statistics.
probs_dir: str, directory to write out output (samples_num, classes_num)
iteration: int
Returns:
None
"""
utilities.create_folder(stats_dir)
utilities.create_folder(probs_dir)
# Predict presence probabilittarget
callback_time = time.time()
(clips_num, time_steps, freq_bins) = input.shape
(input, target) = utilities.transform_data(input, target)
output = model.predict(input)
output = output.astype(np.float32) # (clips_num, classes_num)
# Write out presence probabilities
#NEW prob_path = os.path.join(probs_dir, "prob_{}_iters.p".format(iteration))
#NEW cPickle.dump(output, open(prob_path, 'wb'))
# Calculate statistics
stats = utilities.calculate_stats(output, target)
# Write out statistics
#NEW stat_path = os.path.join(stats_dir, "stat_{}_iters.p".format(iteration))
#NEW cPickle.dump(stats, open(stat_path, 'wb'))
mAP = np.mean([stat['AP'] for stat in stats])
mAUC = np.mean([stat['auc'] for stat in stats])
logging.info(
"mAP: {:.6f}, AUC: {:.6f}, Callback time: {:.3f} s".format(
mAP, mAUC, time.time() - callback_time))
writeToFile(args["filename"], str(mAP),1)
writeToFile(args["filename"], str(mAUC),1)
writeToFile(args["filename"], str(time.time() - callback_time),1)
if False:
logging.info("Saveing prob to {}".format(prob_path))
logging.info("Saveing stat to {}".format(stat_path))
def trainCore(args):
"""Train a model.
"""
data_dir = args["data_dir"]
workspace = args["workspace"]
mini_data = args["mini_data"]
balance_type = args["balance_type"]
learning_rate = args["learning_rate"]
filename = args["filename"]
model_type = args["model_type"]
model = args["model"]
batch_size = args["batch_size"]
# Path of hdf5 data
bal_train_hdf5_path = os.path.join(data_dir, "bal_train.h5")
unbal_train_hdf5_path = os.path.join(data_dir, "unbal_train.h5")
test_hdf5_path = os.path.join(data_dir, "eval.h5")
# Load data
load_time = time.time()
if mini_data:
# Only load balanced data
(bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(
bal_train_hdf5_path)
train_x = bal_train_x
train_y = bal_train_y
train_id_list = bal_train_id_list
else:
# Load both balanced and unbalanced data
(bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(
bal_train_hdf5_path)
(unbal_train_x, unbal_train_y, unbal_train_id_list) = utilities.load_data(
unbal_train_hdf5_path)
train_x = np.concatenate((bal_train_x, unbal_train_x))
train_y = np.concatenate((bal_train_y, unbal_train_y))
train_id_list = bal_train_id_list + unbal_train_id_list
# Test data
(test_x, test_y, test_id_list) = utilities.load_data(test_hdf5_path)
logging.info("Loading data time: {:.3f} s".format(time.time() - load_time))
logging.info("Training data shape: {}".format(train_x.shape))
# Optimization method
optimizer = Adam(lr=learning_rate)
model.compile(loss='binary_crossentropy', optimizer=optimizer)
# Output directories
sub_dir = os.path.join(filename,
'balance_type={}'.format(balance_type),
'model_type={}'.format(model_type))
models_dir = os.path.join(workspace, "models", sub_dir)
utilities.create_folder(models_dir)
stats_dir = os.path.join(workspace, "stats", sub_dir)
utilities.create_folder(stats_dir)
probs_dir = os.path.join(workspace, "probs", sub_dir)
utilities.create_folder(probs_dir)
# Data generator
if balance_type == 'no_balance':
DataGenerator = data_generator.VanillaDataGenerator
elif balance_type == 'balance_in_batch':
DataGenerator = data_generator.BalancedDataGenerator
else:
raise Exception("Incorrect balance_type!")
train_gen = DataGenerator(
x=train_x,
y=train_y,
batch_size=batch_size,
shuffle=True,
seed=1234)
iteration = 0
call_freq = 1000
train_time = time.time()
writeToFile(args["filename"], "iteration,traintTime,trainmAP,trainAUC,trainCallbackTime,testmAP,testAUC,testCallbackTime,None\n",0)
for (batch_x, batch_y) in train_gen.generate():
# Compute stats every several interations
if iteration % call_freq == 0:
logging.info("------------------")
logging.info(
"Iteration: {}, train time: {:.3f} s".format(
iteration, time.time() - train_time))
writeToFile(args["filename"], str(iteration),1)
writeToFile(args["filename"], str(time.time() - train_time),1)
logging.info("Balance train statistics:")
evaluateCore(
args=args,
model=model,
input=bal_train_x,
target=bal_train_y,
stats_dir=os.path.join(stats_dir, 'bal_train'),
probs_dir=os.path.join(probs_dir, 'bal_train'),
iteration=iteration
)
logging.info("Test statistics:")
evaluateCore(
args=args,
model=model,
input=test_x,
target=test_y,
stats_dir=os.path.join(stats_dir, "test"),
probs_dir=os.path.join(probs_dir, "test"),
iteration=iteration
)
writeToFile(args["filename"], '\n',0)
train_time = time.time()
# Update params
(batch_x, batch_y) = utilities.transform_data(batch_x, batch_y)
model.train_on_batch(x=batch_x, y=batch_y)
iteration += 1
# Save model
save_out_path = os.path.join(
models_dir, "md_{}_iters.h5".format(iteration))
#NEW
if (iteration%2000 == 0):
model.save(save_out_path)
# Stop training when maximum iteration achieves
if iteration == 200001:
break
def average_pooling(inputs, **kwargs):
input = inputs[0] # (batch_size, time_steps, freq_bins)
return K.mean(input, axis=1)
def max_pooling(inputs, **kwargs):
input = inputs[0] # (batch_size, time_steps, freq_bins)
return K.max(input, axis=1)
def attention_pooling(inputs, **kwargs):
[out, att] = inputs
epsilon = 1e-7
att = K.clip(att, epsilon, 1. - epsilon)
normalized_att = att / K.sum(att, axis=1)[:, None, :]
return K.sum(out * normalized_att, axis=1)
def pooling_shape(input_shape):
if isinstance(input_shape, list):
(sample_num, time_steps, freq_bins) = input_shape[0]
else:
(sample_num, time_steps, freq_bins) = input_shape
return (sample_num, freq_bins)
def train(args, option=1):
model_type = args["model_type"]
time_steps = 10
freq_bins = 128
classes_num = 527
# Hyper parameters
hidden_units = 1024
drop_rate = 0.5
batch_size = 500
# Embedded layers
input_layer = Input(shape=(time_steps, freq_bins))
a1 = Dense(hidden_units)(input_layer)
a1 = BatchNormalization()(a1)
a1 = Activation('relu')(a1)
a1 = Dropout(drop_rate)(a1)
a2 = Dense(hidden_units)(a1)
a2 = BatchNormalization()(a2)
a2 = Activation('relu')(a2)
a2 = Dropout(drop_rate)(a2)
a3 = Dense(hidden_units)(a2)
a3 = BatchNormalization()(a3)
a3 = Activation('relu')(a3)
a3 = Dropout(drop_rate)(a3)
# Pooling layers
if model_type == 'decision_level_max_pooling':
'''Global max pooling.
[1] Choi, Keunwoo, et al. "Automatic tagging using deep convolutional
neural networks." arXiv preprint arXiv:1606.00298 (2016).
'''
cla = Dense(classes_num, activation='sigmoid')(a3)
output_layer = Lambda(max_pooling, output_shape=pooling_shape)([cla])
elif model_type == 'decision_level_average_pooling':
'''Global average pooling.
[2] Lin, Min, et al. Qiang Chen, and Shuicheng Yan. "Network in
network." arXiv preprint arXiv:1312.4400 (2013).
'''
cla = Dense(classes_num, activation='sigmoid')(a3)
output_layer = Lambda(
average_pooling,
output_shape=pooling_shape)(
[cla])
elif model_type == 'custom':
'''Our model.
[2] Lin, Min, et al. Qiang Chen, and Shuicheng Yan. "Network in
network." arXiv preprint arXiv:1312.4400 (2013).
'''
cla = Dense(classes_num, activation='softmax')(a3)
output_layer = Lambda(
gaussian_normalization,
output_shape=pooling_shape)(
[cla])
elif model_type == 'decision_level_single_attention':
'''Decision level single attention pooling.
[3] Kong, Qiuqiang, et al. "Audio Set classification with attention
model: A probabilistic perspective." arXiv preprint arXiv:1711.00927
(2017).
'''
cla = Dense(classes_num, activation='sigmoid')(a3)
att = Dense(classes_num, activation='softmax')(a3)
output_layer = Lambda(
attention_pooling, output_shape=pooling_shape)([cla, att])
elif model_type == 'decision_level_multi_attention':
'''Decision level multi attention pooling.
[4] Yu, Changsong, et al. "Multi-level Attention Model for Weakly
Supervised Audio Classification." arXiv preprint arXiv:1803.02353
(2018).
'''
cla1 = Dense(classes_num, activation='sigmoid')(a2)
att1 = Dense(classes_num, activation='softmax')(a2)
out1 = Lambda(
attention_pooling, output_shape=pooling_shape)([cla1, att1])
cla2 = Dense(classes_num, activation='sigmoid')(a3)
att2 = Dense(classes_num, activation='softmax')(a3)
out2 = Lambda(
attention_pooling, output_shape=pooling_shape)([cla2, att2])
b1 = Concatenate(axis=-1)([out1, out2])
b1 = Dense(classes_num)(b1)
output_layer = Activation('sigmoid')(b1)
elif model_type == 'feature_level_attention':
'''Feature level attention.
[1] To be appear.
'''
cla = Dense(hidden_units, activation='linear')(a3)
att = Dense(hidden_units, activation='sigmoid')(a3)
b1 = Lambda(
attention_pooling, output_shape=pooling_shape)([cla, att])
b1 = BatchNormalization()(b1)
b1 = Activation(activation='relu')(b1)
b1 = Dropout(drop_rate)(b1)
output_layer = Dense(classes_num, activation='sigmoid')(b1)
else:
raise Exception("Incorrect model_type!")
# Build model
model = Model(inputs=input_layer, outputs=output_layer)
model.summary()
args["model"] = model
args["batch_size"] = batch_size
if(option==0):
# Train
trainCore(args)
elif(option==1):
# Load
return model
def writeToFile(fileName,string, mode):
with open(fileName+".csv", "a") as f:
if(mode == 0):
f.write("".join(string))
elif(mode == 1):
f.write("".join(string+","))
|
from abc import abstractproperty
from vmanage.entity import HelperModel,Model,ModelFactory
from vmanage.policy.model import Definition,CommonDefinition,SequencedDefinition
from vmanage.policy.model import Policy,GUIPolicy,CLIPolicy
from vmanage.policy.model import DefinitionApplication,SequencedDefinition
from vmanage.policy.model import DefinitionSequenceElement
from vmanage.policy.model import DefinitionActionElementFactory
from vmanage.policy.model import DefinitionMultiActionElement,DefinitionUniActionElement,DefinitionActionElement
from vmanage.policy.model import DefinitionActionEntry,DefinitionActionValuedEntry,DefinitionActionEntryFactory
from vmanage.policy.model import DefinitionActionReferenceEntry
from vmanage.policy.tool import DefinitionType,PolicyType
from vmanage.policy.tool import accumulator,ReferenceType
from vmanage.policy.centralized.tool import CentralizedReferences,CentralizedDefinitions
class CentralizedGUIPolicy(GUIPolicy):
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
for application in self.assembly:
if isinstance(application,DefinitionReferenceApplication):
application.references(accumulator=accumulator)
return accumulator
@accumulator(CentralizedDefinitions)
def definitions(self,accumulator:CentralizedDefinitions=None):
for application in self.assembly:
accumulator.add_by_type(application.type,application.id)
return accumulator
@property
def assembly(self):
factory = DefinitionApplicationFactory()
return (
factory.from_dict(entry)
for entry in self.definition.get(GUIPolicy.ASSEMBLY_FIELD)
)
class PolicyFactory(ModelFactory):
def from_dict(self,document:dict):
policy_type = PolicyType(document.get(Policy.TYPE_FIELD))
if policy_type == PolicyType.CLI:
return CLIPolicy.from_dict(document)
elif policy_type == PolicyType.FEATURE:
return CentralizedGUIPolicy.from_dict(document)
raise ValueError("Unsupported Policy Type: {0}".format(policy_type))
class CentralizedSequencedDefinition(SequencedDefinition):
@property
def sequences(self):
return (
CentralizedSequenceElement(sequence)
for sequence in self.definition
)
class CentralizedSequenceElement(DefinitionSequenceElement):
@property
def actions(self):
factory = CentralizedActionElementFactory()
return (
factory.from_dict(entry)
for entry in self.definition.get(DefinitionSequenceElement.ACTIONS_FIELD)
)
class CentralizedDefUniActionElement(DefinitionUniActionElement):
@property
def parameter(self):
factory = CentralizedActionEntryFactory()
return factory.from_dict(self.definition.get(DefinitionActionElement.PARAMETER_FIELD))
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
if isinstance(self.parameter,DefinitionActionServiceEntry):
self.parameter.references(accumulator=accumulator)
super().references(accumulator=accumulator)
return accumulator
class CentralizedDefMultiActionElement(DefinitionMultiActionElement):
@property
def parameters(self):
factory = CentralizedActionEntryFactory()
return (
factory.from_dict(entry)
for entry in self.definition.get(DefinitionActionElement.PARAMETER_FIELD)
)
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
for param in self.parameters:
if isinstance(param,DefinitionActionServiceEntry):
param.references(accumulator=accumulator)
super().references(accumulator=accumulator)
return accumulator
class SLAClassActionElement(CentralizedDefMultiActionElement):
TYPE = "slaClass"
@property
def parameters(self):
factory = DefinitionSLAClassActionEntryFactory()
return (
factory.from_dict(entry)
for entry in self.definition.get(DefinitionActionElement.PARAMETER_FIELD)
)
class CentralizedActionElementFactory(DefinitionActionElementFactory):
def from_dict(self,document:dict):
action_type = document.get(DefinitionActionElement.TYPE_FIELD)
parameter = document.get(DefinitionActionElement.PARAMETER_FIELD)
if action_type == SLAClassActionElement.TYPE:
return SLAClassActionElement(document)
elif isinstance(parameter,dict):
return CentralizedDefUniActionElement(document)
elif isinstance(parameter,list):
return CentralizedDefMultiActionElement(document)
return super().from_dict(document)
class DefinitionActionServiceEntry(DefinitionActionValuedEntry):
TYPE = "service"
@property
def service(self):
factory = ActionServiceFactory()
return factory.from_dict(self.value)
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
if isinstance(self.service,ReferenceActionService):
self.service.references(accumulator=accumulator)
return accumulator
class ActionService(HelperModel):
TYPE_FIELD = "type"
VPN_FIELD = "vpn"
TLOC_FIELD = "tloc"
TLOC_LIST_FIELD = "tlocList"
@property
def type(self):
return self.definition.get(ActionService.TYPE_FIELD)
@property
def vpn(self):
return self.definition.get(ActionService.VPN_FIELD)
class ReferenceActionService(ActionService):
REFERENCE_VALUE = "ref"
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.tloc_lists.add(self.tloc_list)
return accumulator
@property
def tloc_list(self):
return self.definition.get(ActionService.TLOC_LIST_FIELD,{}).get(ReferenceActionService.REFERENCE_VALUE)
class ValuedActionService(ActionService):
@property
def tloc(self):
self.definition.get(ActionService.TLOC_FIELD)
class ActionServiceFactory:
def from_dict(self,document:dict):
if document.get(ActionService.TLOC_FIELD):
return ValuedActionService(document)
elif document.get(ActionService.TLOC_LIST_FIELD):
return ReferenceActionService(document)
return ActionService(document)
class CentralizedActionEntryFactory(DefinitionActionEntryFactory):
def from_dict(self,document:dict):
action_type = document.get(DefinitionActionEntry.FIELDTYPE_FIELD)
if action_type == DefinitionActionServiceEntry.TYPE:
return DefinitionActionServiceEntry(document)
return super().from_dict(document)
class DefinitionSLAClassActionEntry(DefinitionActionReferenceEntry):
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.add_by_type(ReferenceType.SLA_CLASS,self.definition.get(DefinitionActionEntry.REFERENCE_FIELD))
return accumulator
class DefinitionSLAClassActionEntryFactory(CentralizedActionEntryFactory):
def from_dict(self,document:dict):
action_type = document.get(DefinitionActionEntry.FIELDTYPE_FIELD)
if action_type == "name":
return DefinitionSLAClassActionEntry(document)
return super().from_dict(document)
class HubNSpokeDefinition(CommonDefinition):
TYPE = DefinitionType.HUB_N_SPOKE
SUBDEFINITIONS_FIELD = "subDefinitions"
VPNLIST_FIELD = "vpnList"
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.vpn_lists.add(self.vpn_list)
for subdef in self.sub_definitions:
subdef.references(accumulator=accumulator)
return accumulator
@property
def vpn_list(self):
return self.definition.get(HubNSpokeDefinition.VPNLIST_FIELD)
@property
def sub_definitions(self):
return (
HubNSpokeSubDefinition(definition)
for definition in self.definition.get(HubNSpokeDefinition.SUBDEFINITIONS_FIELD)
)
class HubNSpokeSubDefinition(HelperModel):
SPOKES_FIELD = "spokes"
TLOCLIST_FIELD = "tlocList"
@property
def spokes(self):
return (
HubNSpokeSpokeElement(definition)
for definition in self.definition.get(HubNSpokeSubDefinition.SPOKES_FIELD,[])
)
@property
def tloc_list(self):
return self.definition.get(HubNSpokeSubDefinition.TLOCLIST_FIELD)
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
if self.tloc_list:
accumulator.tloc_lists.add(self.tloc_list)
for spoke in self.spokes:
spoke.references(accumulator=accumulator)
return accumulator
class HubNSpokeSpokeElement(HelperModel):
SITELIST_FIELD = "siteList"
HUBS_FIELD = "hubs"
@property
def site_list(self):
return self.definition.get(HubNSpokeSpokeElement.SITELIST_FIELD)
@property
def hubs(self):
return (
HubNSpokeHubElement(definition)
for definition in self.definition.get(HubNSpokeSpokeElement.HUBS_FIELD,[])
)
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.add(self.site_list)
for hub in self.hubs:
hub.references(accumulator=accumulator)
return accumulator
class HubNSpokeHubElement(HelperModel):
SITELIST_FIELD = "siteList"
PREFIXLIST_FIELD = "prefixLists"
@property
def site_list(self):
return self.definition.get(HubNSpokeHubElement.SITELIST_FIELD)
@property
def prefix_lists(self):
return self.definition.get(HubNSpokeHubElement.PREFIXLIST_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.add(self.site_list)
accumulator.prefix_lists.update(self.prefix_lists)
return accumulator
class MeshDefinition(CommonDefinition):
TYPE = DefinitionType.MESH
REGIONS_FIELD = "regions"
VPNLIST_FIELD = "vpnList"
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.vpn_lists.add(self.vpn_list)
for region in self.regions:
region.references(accumulator=accumulator)
return accumulator
@property
def vpn_list(self):
return self.definition.get(MeshDefinition.VPNLIST_FIELD)
@property
def regions(self):
return (MeshRegionElement(definition)
for definition in self.definition.get(MeshDefinition.REGIONS_FIELD,[]))
class MeshRegionElement(HelperModel):
SITELISTS_FIELD = "siteLists"
@property
def site_lists(self):
return self.definition.get(MeshRegionElement.SITELISTS_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.update(self.site_lists)
return accumulator
class ControlDefinition(CentralizedSequencedDefinition):
TYPE = DefinitionType.CONTROL
class VPNMembershipDefinition(CommonDefinition):
TYPE = DefinitionType.VPN_MEMBERSHIP
SITES_FIELD = "sites"
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
for site in self.sites:
site.references(accumulator=accumulator)
return accumulator
@property
def sites(self):
return (
VPNMembershipSiteElement(site)
for site in self.definition[VPNMembershipDefinition.SITES_FIELD]
)
class VPNMembershipSiteElement(HelperModel):
SITELIST_FIELD = "siteList"
VPNLISTS_FIELD = "vpnList"
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.add(self.site_list)
accumulator.vpn_lists.update(self.vpn_lists)
return accumulator
@property
def site_list(self):
return self.definition[VPNMembershipSiteElement.SITELIST_FIELD]
@property
def vpn_lists(self):
return self.definition[VPNMembershipSiteElement.VPNLISTS_FIELD]
class AppRouteDefinition(CentralizedSequencedDefinition):
TYPE = DefinitionType.APP_ROUTE
class DataDefinition(CentralizedSequencedDefinition):
TYPE = DefinitionType.DATA
class CflowdDefinition(CommonDefinition):
TYPE = DefinitionType.CFLOWD
class DefinitionFactory(ModelFactory):
def from_dict(self,document:dict):
doc_type = DefinitionType(document.get(Definition.TYPE_FIELD))
if doc_type == DefinitionType.HUB_N_SPOKE:
return HubNSpokeDefinition.from_dict(document)
elif doc_type == DefinitionType.MESH:
return MeshDefinition.from_dict(document)
elif doc_type == DefinitionType.CONTROL:
return ControlDefinition.from_dict(document)
elif doc_type == DefinitionType.VPN_MEMBERSHIP:
return VPNMembershipDefinition.from_dict(document)
elif doc_type == DefinitionType.APP_ROUTE:
return AppRouteDefinition.from_dict(document)
elif doc_type == DefinitionType.DATA:
return DataDefinition.from_dict(document)
elif doc_type == DefinitionType.CFLOWD:
return CflowdDefinition.from_dict(document)
raise ValueError("Unsupported Definition Type: {0}".format(doc_type))
class DefinitionReferenceApplication(DefinitionApplication):
ENTRIES_FIELD = "entries"
@abstractproperty
def entries(self):
pass
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
for entry in self.entries:
entry.references(accumulator=accumulator)
return accumulator
class ControlPolicyApplication(DefinitionReferenceApplication):
DEFINITION = ControlDefinition
@property
def entries(self):
return (
ControlDirectionApplication(entry)
for entry in self.definition.get(DefinitionReferenceApplication.ENTRIES_FIELD)
)
class ControlDirectionApplication(HelperModel):
DIRECTION_FIELD = "direction"
SITELISTS_FIELD = "siteLists"
@property
def direction(self):
return self.definition.get(ControlDirectionApplication.DIRECTION_FIELD)
@property
def site_lists(self):
return self.definition.get(ControlDirectionApplication.SITELISTS_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.update(self.site_lists)
return accumulator
class DataPolicyApplication(DefinitionReferenceApplication):
DEFINITION = DataDefinition
@property
def entries(self):
return (
DataDirectionApplication(entry)
for entry in self.definition.get(DefinitionReferenceApplication.ENTRIES_FIELD)
)
class DataDirectionApplication(HelperModel):
DIRECTION_FIELD = "direction"
SITELISTS_FIELD = "siteLists"
VPNLISTS_FIELD = "vpnLists"
@property
def direction(self):
return self.definition.get(DataDirectionApplication.DIRECTION_FIELD)
@property
def site_lists(self):
return self.definition.get(DataDirectionApplication.SITELISTS_FIELD,[])
@property
def vpn_lists(self):
return self.definition.get(DataDirectionApplication.VPNLISTS_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.update(self.site_lists)
accumulator.vpn_lists.update(self.vpn_lists)
return accumulator
class CflowdPolicyApplication(DefinitionReferenceApplication):
DEFINITION = CflowdDefinition
@property
def entries(self):
return (
CflowdApplicationEntry(entry)
for entry in self.definition.get(DefinitionReferenceApplication.ENTRIES_FIELD)
)
class CflowdApplicationEntry(HelperModel):
SITELISTS_FIELD = "siteLists"
@property
def site_lists(self):
return self.definition.get(CflowdApplicationEntry.SITELISTS_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.update(self.site_lists)
return accumulator
class AppRoutePolicyApplication(DefinitionReferenceApplication):
DEFINITION = AppRouteDefinition
@property
def entries(self):
return (
AppRouteApplicationEntry(entry)
for entry in self.definition.get(DefinitionReferenceApplication.ENTRIES_FIELD)
)
class AppRouteApplicationEntry(HelperModel):
SITELISTS_FIELD = "siteLists"
VPNLISTS_FIELD = "vpnLists"
@property
def site_lists(self):
return self.definition.get(AppRouteApplicationEntry.SITELISTS_FIELD,[])
@property
def vpn_lists(self):
return self.definition.get(AppRouteApplicationEntry.VPNLISTS_FIELD,[])
@accumulator(CentralizedReferences)
def references(self,accumulator:CentralizedReferences=None):
accumulator.site_lists.update(self.site_lists)
accumulator.vpn_lists.update(self.vpn_lists)
return accumulator
class DefinitionApplicationFactory:
def from_dict(self,document:dict):
doc_type = DefinitionType(document.get(DefinitionApplication.TYPE_FIELD))
if doc_type == ControlDefinition.TYPE:
return ControlPolicyApplication(document)
elif doc_type == DataDefinition.TYPE:
return DataPolicyApplication(document)
elif doc_type == CflowdDefinition.TYPE:
return CflowdPolicyApplication(document)
elif doc_type == AppRouteDefinition.TYPE:
return AppRoutePolicyApplication(document)
return DefinitionApplication(document) |
# -*- coding: Cp1250 -*-
from data import locations
from data.turnus_type import TurnusType
from data.general import DataContainer
import os
import sys
import csv
def input_turnus_types():
FILES_DIR = os.path.join('persistence', 'data', 'start_data')
FILE_NAME = 'vrste_turnusov.csv'
print 'Zacetek vzpostavljanja zacetnega stanja vrst turnusov.'
print 'Zaceli boste z vnosom vrst turnusov.'
sys.stdout.write('\tBranje datoteke...')
f = file(os.path.join(FILES_DIR, FILE_NAME), 'rb')
reader = csv.reader(f)
sys.stdout.write('OK\n')
turnus_types = []
for row in reader:
sys.stdout.write('\tUstvarjanje vrste turnusa: ' + str(row) + '...')
turnus_types.append(TurnusType(*row))
sys.stdout.write('OK\n')
sys.stdout.write('\tBrisanje starih in pisanje novih vrst turnusov ...')
tc = DataContainer(locations.TURNUS_TYPE_DATA, TurnusType, sorted(turnus_types))
tc.save()
sys.stdout.write('OK\n')
print 'Konec vnosa vrst turnsov.'
print 'Zacetno stanje vrst turnusov vzpostavljeno.\n' |
import torch
from torch.utils.data import Dataset
import os
import numpy as np
import cv2
import matplotlib.pyplot as plt
class KneeMRI(Dataset):
def __init__(self, target_dir, noise_dirs):
self.target_dir = target_dir
self.noise_dirs = noise_dirs
self.target_files = []
self.noise1_files = []
if len(noise_dirs) > 1:
self.noise2_files = []
self._set_files()
def _set_files(self):
self.target_files = os.listdir(self.target_dir)
self.noise1_files = os.listdir(self.noise_dirs[0])
if len(self.noise_dirs) > 1:
self.noise2_files = os.listdir(self.noise_dirs[1])
def __len__(self):
return len(self.target_files)
def __getitem__(self, index):
path_target = os.path.join(self.target_dir, self.target_files[index])
slice = torch.load(path_target)
if len(self.noise_dirs) == 1:
path_noise = os.path.join(self.noise_dirs[0], self.noise1_files[index])
noisy_slice = torch.load(path_noise)
noisy_slice = torch.unsqueeze(noisy_slice, 0)
cropp1 = noisy_slice[:, 0:160, 0:160]
cropp2 = noisy_slice[:, 160:320, 0:160]
cropp3 = noisy_slice[:, 0:160, 160:320]
cropp4 = noisy_slice[:, 160:320, 160:320]
slice = torch.unsqueeze(slice, 0)
slice1 = slice[:, 0:160, 0:160]
slice2 = slice[:, 160:320, 0:160]
slice3 = slice[:, 0:160, 160:320]
slice4 = slice[:, 160:320, 160:320]
return cropp1.float(), cropp2.float(), cropp3.float(), cropp4.float(), slice1.float(), slice2.float(), \
slice3.float(), slice4.float(), self.target_files[index]
if len(self.noise_dirs) == 2:
path_noise1 = os.path.join(self.noise_dirs[0], self.noise1_files[index])
path_noise2 = os.path.join(self.noise_dirs[1], self.noise2_files[index])
noisy_slice1 = torch.load(path_noise1)
noisy_slice1 = torch.unsqueeze(noisy_slice1, 0)
noisy_slice2 = torch.load(path_noise2)
noisy_slice2 = torch.unsqueeze(noisy_slice2, 0)
cropp1_1 = noisy_slice1[:, 0:160, 0:160]
cropp2_1 = noisy_slice1[:, 160:320, 0:160]
cropp3_1 = noisy_slice1[:, 0:160, 160:320]
cropp4_1 = noisy_slice1[:, 160:320, 160:320]
cropp1_2 = noisy_slice2[:, 0:160, 0:160]
cropp2_2 = noisy_slice2[:, 160:320, 0:160]
cropp3_2 = noisy_slice2[:, 0:160, 160:320]
cropp4_2 = noisy_slice2[:, 160:320, 160:320]
slice = torch.unsqueeze(slice, 0)
slice1 = slice[:, 0:160, 0:160]
slice2 = slice[:, 160:320, 0:160]
slice3 = slice[:, 0:160, 160:320]
slice4 = slice[:, 160:320, 160:320]
return cropp1_1.float(), cropp2_1.float(), cropp3_1.float(), cropp4_1.float(), cropp1_2.float(), \
cropp2_2.float(), cropp3_2.float(), cropp4_2.float(), slice1.float(), slice2.float(), \
slice3.float(), slice4.float(), self.target_files[index]
|
from django.contrib import admin
admin.site.site_header = 'BYTEME Admin'
admin.site.site_title = 'BYTEME Admin' |
# Restart 28. 특정 문자열로 끝나는 가장 긴 부분 문자열 찾기
'''
Not solve this problem by myself.
So see this again
'''
def solution(myString, pat):
answer = myString[:len(myString) - myString[::-1].index(pat[::-1])]
# print(myString[:len(myString)])
# print(myString[::-1])
# print(myString[::-1].index(pat[::-1]))
# print(myString[myString[::-1].index(pat[::-1])])
# print(myString[:len(myString) - myString[::-1].index(pat[::-1])])
return answer
myString_1 = "AbCdEFG"
myString_2 = "AAAAaaaa"
pat_1 = "dE"
pat_2 = "a"
print(solution(myString_1, pat_1))
print(solution(myString_2, pat_2))
def solution_other(myString, pat):
answer = myString[::-1][myString[::-1].index(pat[::-1]):][::-1]
return answer
# print(solution_other(myString_1, pat_1))
# print(solution_other(myString_2, pat_2))
solution_best = lambda x, y:x[:x.rindex(y) + len(y)]
# print(solution_best(myString_1, pat_1))
# print(solution_best(myString_2, pat_2)) |
from bs4 import BeautifulSoup
from urllib.request import urlopen
import re
pages = set()
def get_links(page_url):
global pages
html = urlopen('http://en.wikipedia.org' + page_url)
bs_obj = BeautifulSoup(html.read(), 'html.parser')
try:
print(bs_obj.h1.get_text())
print(bs_obj.find(id='mw-content-text').findAll('p')[0])
print(bs_obj.find(id='ca-edit').find('span').find('a').attrs['href'])
except AttributeError:
print('This page is missing something! No worries though!')
for link in bs_obj.findAll('a', href=re.compile('^(/wiki/)')):
if 'href' in link.attrs:
if link.attrs['href'] not in pages:
# we have encountered new page here
new_page = link.attrs['href']
print(new_page)
pages.add('----------------\n' + new_page)
get_links(new_page)
get_links("")
|
#! /usr/bin/env python
# -*- coding: utf-8 -*-
"""
Module that contains consts definitions used by libraries
"""
from __future__ import print_function, division, absolute_import
from Qt.QtCore import Signal, QObject
from Qt.QtGui import QColor
# class LibraryItemSignals(QObject, object):
# """
# Class that contains definition for LibraryItem signals
# """
#
# saved = Signal(object)
# saving = Signal(object)
# loaded = Signal(object)
# copied = Signal(object, object, object)
# deleted = Signal(object)
# renamed = Signal(object, object, object)
LIBRARY_DEFAULT_NAME = 'DefaultLibrary'
# DEFAULT_ICON_MODE = 'icon'
# DEFAULT_TABLE_MODE = 'table'
DPI_ENABLED = True
DPI_MIN_VALUE = 80
DPI_MAX_VALUE = 250
ITEM_DEFAULT_SORT_ROLE = 'SortRole'
ITEM_DEFAULT_DATA_ROLE = 'DataRole'
# ITEM_DEFAULT_MAX_ICON_SIZE = 256
# ITEM_DEFAULT_FONT_SIZE = 13
# ITEM_DEFAULT_PLAYHEAD_COLOR = QColor(255, 255, 255, 220)
# ITEM_DEFAULT_THUMBNAIL_COLUMN = 0
# ITEM_DEFAULT_ENABLE_THUMBNAIL_THREAD = True
ITEM_DEFAULT_ENABLE_DELETE = False
ITEM_DEFAULT_ENABLE_NESTED_ITEMS = False
ITEM_DEFAULT_EXTENSION = ''
ITEM_DEFAULT_MENU_NAME = ''
# ITEM_DEFAULT_MENU_ORDER = 10
# ITEM_DEFAULT_MENU_ICON = ''
GROUP_ITEM_DEFAULT_FONT_SIZE = 24
# TREE_MINIMUM_WIDTH = 5
# TREE_DEFAULT_WIDTH = 100
# LIST_DEFAULT_DRAG_THRESHOLD = 10
# VIEWER_DEFAULT_PADDING = 5
# VIEWER_DEFAULT_ZOOM_AMOUNT = 90
# VIEWER_DEFAULT_TEXT_HEIGHT = 20
# VIEWER_DEFAULT_WHEEL_SCROLL_STEP = 2
# VIEWER_DEFAULT_MIN_SPACING = 0
# VIEWER_DEFAULT_MAX_SPACING = 50
# VIEWER_DEFAULT_MIN_LIST_SIZE = 15
# VIEWER_DEFAULT_MIN_ICON_SIZE = 50
# VIEWER_DEFAULT_TEXT_COLOR = QColor(255, 255, 255, 200)
# VIEWER_DEFAULT_SELECTED_TEXT_COLOR = QColor(255, 255, 255, 200)
# VIEWER_DEFAULT_BACKGROUND_COLOR = QColor(255, 255, 255, 30)
# VIEWER_DEFAULT_BACKGROUND_HOVER_COLOR = QColor(255, 255, 255, 35)
# VIEWER_DEFAULT_BACKGROUND_SELECTED_COLOR = QColor(30, 150, 255)
ICON_COLOR = QColor(255, 255, 255)
ICON_BADGE_COLOR = QColor(230, 230, 0)
# PROGRESS_BAR_VISIBLE = True
SETTINGS_DIALOG_ENABLED = False
RECURSIVE_SEARCH_ENABLED = False
# TRASH_NAME = 'trash'
# TRASH_ENABLED = True
# DEFAULT_RECURSIVE_DEPTH = 8
# DEFAULT_RECURSIVE_SEARCH_ENABLED = False
DEFAULT_SETTINGS = {
"library": {
"sortBy": ["name:asc"],
"groupBy": ["category:asc"]
},
"paneSizes": [160, 280, 180],
"geometry": [-1, -1, 860, 720],
"trashFolderVisible": False,
"sidebarWidgetVisible": True,
"previewWidgetVisible": True,
"menuBarWidgetVisible": True,
"statusBarWidgetVisible": True,
"recursiveSearchEnabled": True,
"itemsWidget": {
"spacing": 2,
"padding": 6,
"zoomAmount": 80,
"textVisible": True,
},
"searchWidget": {
"text": "",
},
"filterByMenu": {
"Folder": False
},
"theme": {
"accentColor": "rgb(0, 175, 240, 255)",
"backgroundColor": "rgb(60, 64, 79, 255)",
}
}
|
#!/usr/bin/env python3
""" Run an adversarial attack """
import os
import json
import scipy
import joblib
import argparse
import numpy as np
from os.path import join
from keras import backend as K
from inception_v3 import InceptionV3
from keras.preprocessing import image
from keras.utils.data_utils import get_file
from cleverhans.utils_keras import KerasModelWrapper
from cleverhans.attacks import MomentumIterativeMethod
from keras.applications.imagenet_utils import decode_predictions
CLASS_INDEX_PATH = 'https://s3.amazonaws.com/deep-learning-models/' + \
'image-models/imagenet_class_index.json'
ATTACK_PARAMS = {
'eps': 0.3,
'nb_iter': 20,
'clip_min': -1.,
'clip_max': 1.
}
def get_imagenet_index():
""" Load imagenet class map """
int_to_str, str_to_int = {}, {}
fpath = get_file('imagenet_class_index.json',
CLASS_INDEX_PATH,
cache_subdir='models',
file_hash='c2c37ea517e94d9795004a39431a14cb')
with open(fpath) as f:
data = json.load(f)
for k, v in data.items():
int_to_str[int(k)] = v[1]
str_to_int[v[1]] = int(k)
return int_to_str, str_to_int
def preprocess_input(x):
""" Model weights were trained expecting this preprocessing """
return (x / 127.5) - 1.
def postprocess_input(x):
""" Undo the preprocessing in preprocess_input to get an image back """
return (x + 1.) * 127.5
def save_adv(attack, source, dest, target):
""" Load an image and save an adversarial image """
img = image.load_img(source, target_size=(299, 299))
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
y_target = np.zeros([ 1, 1000 ])
y_target[0, target] = 1
adv_x = attack.generate_np(x, y_target=y_target, **ATTACK_PARAMS)
img = postprocess_input(adv_x[0])
scipy.misc.imsave(dest, img)
parser = argparse.ArgumentParser()
parser.add_argument('--output-images', default='./output')
parser.add_argument('--classes', nargs='+', default=[], type=int)
parser.add_argument('source_images', metavar='source-images')
parser.add_argument('map_file', metavar='map-file')
if __name__ == '__main__':
args = parser.parse_args()
model = InceptionV3(include_top=True, weights='imagenet')
_, imagenet_map = get_imagenet_index()
map_data = joblib.load(args.map_file)
sess = K.get_session()
wrap = KerasModelWrapper(model)
attack = MomentumIterativeMethod(wrap, sess=sess)
classes = list(range(len(map_data[0])))
if args.classes == []: args.classes = classes
assert len(map_data[0]) == len(map_data[1])
for i in classes:
source_class = map_data[0][i]
target_classes = map_data[1][i]
if i not in args.classes: continue
for target_class, _ in target_classes:
target_class_i = imagenet_map[target_class]
src_dir_path = join(args.source_images, source_class)
dst_dir_path = join(args.output_images, source_class, target_class)
image_filenames = [ fname for fname in os.listdir(src_dir_path) ]
for filename in image_filenames:
src_path = join(src_dir_path, filename)
dst_path = join(dst_dir_path, filename)
os.makedirs(dst_dir_path, exist_ok=True)
save_adv(attack, src_path, dst_path, target_class_i)
print(src_path, '->', dst_path)
|
from itertools import combinations
import sys
def get_score(arr, team):
score = 0
for a, b in combinations(team, 2):
score += arr[a][b] + arr[b][a]
return score
n = int(sys.stdin.readline().strip())
arr = [list(map(int, sys.stdin.readline().strip().split())) for _ in range(n)]
best_score = float('inf')
for comb in combinations(range(n), n//2):
sA = get_score(arr, comb)
sB = get_score(arr, list(filter(lambda x: x not in comb, range(n))))
s = abs(sA - sB)
if s < best_score:
best_score = s
print(best_score) |
from __future__ import print_function
from neural_network import NeuralNetwork
from nn_img2num import NnImg2Num
from my_img2num import MyImg2Num
import numpy
import random
import torch
import torchvision
from torch.autograd import Variable
def test():
#prepare sample input for forwad()
train_loader = torchvision.datasets.MNIST('../data_MNIST', train=True, download=True, transform=True,target_transform=True)
epoch = 10; learning_rate = 0.5; batch_size = 100; numCls = 10; iteration = len(train_loader) / batch_size;
input = (torch.squeeze(train_loader.train_data.view(iteration, batch_size, 1, 784), 2)).type(torch.FloatTensor)
# Test NnImg2Num
net = NnImg2Num()
net.train()
print('forward output', net.forward(input[0]))
print('call output', net(input[0]))
#Test MyImg2Num
netMy = MyImg2Num();
netMy.train()
print('forward output', netMy.forward((input[0])))
print('call output', netMy((input[0])))
test()
|
"""Store the data in a SQLite database.
Usage:
$ python make_db.py
# Drop DB and verbose output
$ python make_db.py -d -v
"""
import os
import time
import logging
import argparse
import pandas as pd
import sqlalchemy as sa
from argparse import RawDescriptionHelpFormatter
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
formatter_str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
formatter = logging.Formatter(formatter_str)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
HERE = os.path.abspath(os.path.dirname(__file__))
ROOT = os.path.abspath(os.path.join(HERE, ".."))
DATA_DIR = os.path.abspath(os.path.join(ROOT, "data"))
DB_NAME = "Pokemon.db"
DB_PATH = os.path.abspath(os.path.join(ROOT, DB_NAME))
TABLE_NAME = "pokemons"
# TODO: abilities table
def parse_args():
parser = argparse.ArgumentParser(
description=__doc__, formatter_class=RawDescriptionHelpFormatter
)
parser.add_argument(
"-d",
"--debug",
action="store_true",
help="If set, drop the database at startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="If set, increase output verbosity"
)
return parser.parse_args()
def main():
t0 = time.time()
args = parse_args()
if args.verbose:
sh.setLevel(logging.DEBUG)
else:
sh.setLevel(logging.INFO)
logger.addHandler(sh)
if args.debug:
try:
os.unlink(DB_PATH)
except FileNotFoundError:
pass
file_path = os.path.abspath(os.path.join(DATA_DIR, "pokemon.csv"))
df = pd.read_csv(file_path)
logger.debug(f"DataFrame: {df.shape}")
logger.debug(df.columns)
engine = sa.create_engine(f"sqlite:///{DB_PATH}")
df.to_sql(TABLE_NAME, con=engine, if_exists="append", index=False)
t1 = time.time()
logger.info(f"Done in: {(t1 - t0):.2f} seconds")
if __name__ == "__main__":
main()
|
# -*- coding: utf-8 -*-
from binance.client import Client
from binance.enums import *
from binance.websockets import BinanceSocketManager
import time
from datetime import datetime
import prettytable as pt
from settings import MarginAccount
from settings import BinanceKey1
api_key = BinanceKey1['api_key']
api_secret = BinanceKey1['api_secret']
client = Client(api_key, api_secret, {"verify": True, "timeout": 10000})
# 配置参数
pair_symbol = MarginAccount['pair_symbol']
coin_symbol = MarginAccount['coin_symbol']
usdt_symbol = MarginAccount['usdt_symbol']
bnb_symbol = MarginAccount['bnb_symbol']
max_margins = 15
# 投入本金,手工统计
base_balance = MarginAccount['base_balance']
fiat_symbol = MarginAccount['fiat_symbol']
fiat_price = MarginAccount['fiat_price']
def run():
print('='*30)
get_all_margin_orders()
def get_all_margin_orders():
orders = client.get_open_margin_orders(symbol=pair_symbol)
tb = pt.PrettyTable()
tb.field_names = ["orderId", "Qty", "Price", "Side","Symbol", "Time"]
for o in orders:
tb.add_row([ o["orderId"], o["origQty"], o["price"], o["side"], o["symbol"], timestamp2string(o["time"]) ])
print(tb)
def timestamp2string(timeStamp):
try:
d = datetime.fromtimestamp(int(timeStamp)/1000)
dtstr = d.strftime("%Y-%m-%d %H:%M:%S")
return dtstr
except Exception as e:
print(e)
return ''
if __name__ == "__main__":
run() |
import random
def ia(boxes):
if (random.randint(0, 1)):
return ('G')
else:
return ('D')
|
from bs4 import BeautifulSoup
import requests
from win32com.client import Dispatch
import win32com.client as wincl
import datetime
import time
# def speak(text): # male voice
# speak = Dispatch("SAPI.SpVoice")
# speak.Speak(text)
def speak(text): # female voice
speaker_number = 1
spk = wincl.Dispatch("SAPI.SpVoice")
vcs = spk.GetVoices()
SVSFlag = 11
spk.Voice
spk.SetVoice(vcs.Item(speaker_number)) # set voice (see Windows Text-to-Speech settings)
spk.Speak(text)
def wishme():
hour = int(datetime.datetime.now().hour)
if hour>=0 and hour<12:
speak("Good Morning")
print("Good Morning....")
elif hour>=12 and hour<18:
speak("Good Afternoon")
print("Good Afternoon....")
else:
speak("Good Evening")
print("Good Evening....")
speak("I am Zira from Times of India ... here to tell you today's top twenty news !! so here I Begin....")
def get_fresh_news():
url = 'https://timesofindia.indiatimes.com/briefs'
r = requests.get(url)
html_content = r.content
soup = BeautifulSoup(html_content,'html.parser')
news_box = soup.find_all('div', class_='brief_box')
all_headings = []
for news in news_box:
if news != None:
all_headings.append(news.find('h2'))
i=1
top_20 = []
for h in all_headings:
if h != None:
if i<=20:
top_20.append(h.text)
i = i+1
return top_20
if __name__ == "__main__":
speak('hello')
while True:
wishme()
news = get_fresh_news()
for i in news:
speak(i)
time.sleep(2)
speak('This was todays news Thankyou for Listening !!!')
time.sleep(100)
|
from bs4 import BeautifulSoup
import requests
import re
response = requests.get('https://en.wikipedia.org/wiki/Neighborhoods_in_New_York_City')
soup = BeautifulSoup(response.text, 'html.parser')
table = soup.table.find_all('td')
count = 4
n = []
while count < len(table):
content = str(table[count])
content = content.strip('<td>')
content = content.strip('</')
content = content.split()
n.append(content)
count += 5
print n
# for i in range(len(table)):
# neighborhoods.append( table[count] )
# count +=5
# print soup.table.find_all('td') |
# -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: testKafka
Description :
Author : ZWZ
date: 18-6-15
-------------------------------------------------
Change Activity:
18-6-15:
-------------------------------------------------
"""
import json
from test.localKafkaUrlinformation import localKafkaUrlinformation
from utils.LogUtil import Log
__author__ = 'ZWZ'
if __name__ == '__main__':
KafkaOperator = localKafkaUrlinformation()
for i in range(0, 10):
KafkaOperator.producerUrl(str(i))
|
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import numpy as np
mat1=np.zeros((4,4),dtype=int)
print(mat1)
#Negate all values which are more than 5 in a row
arr1=np.arange(11)
arr1[arr1>5]=-arr1[arr1>5]
arr1[6:]=-arr1[6:]
mat1 = np.array([['abc','A'],['def','B'],['ghi','C'],['jkl','D']])
arr = np.array(['abc','dfe','ghi','kjl'])
#sorty array by second column.
arr = np.array([[1,21,3],[5,4,2],[56,12,4]])
print(arr[arr[:,1].argsort()])
#Get top 4 values in array
arr = np.array([90, 14, 24, 13, 13, 590, 0, 45, 16, 50])
print(arr[np.argpartition(arr,-4)[-4:]])
#Find the nearest number from the given number in an array.
arr = np.array([10,55,22,3,6,44,9,54])
nearest_to = 50
print( arr[np.abs(arr-nearest_to).argmin()])
mat = np.array([[10,5,9],
[2,20,6],
[8,3,30]]).reshape(3,3)
#N1 to the upper half elements of mat, find highest number
#N2 to the main diagonal elements of mat, find highest number
#N3 to the lower half elements of mat, find highest number
|
business_names = [ "burger king", "McDonald's", "super duper burger's", "subway", "pizza hut","pizza fateh"]
searchTerm = "duper bur"
res = []
for i in business_names:
split_name = i.split()
searchtermsplit = searchTerm.split()
count = 0
for words in split_name:
for search1 in searchtermsplit:
if(words.startswith(search1)):
count+=1
if(count >= 1 and count == len(searchtermsplit)):
res.append(' '.join(split_name))
print(res)
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.1 on 2018-03-18 23:21
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('terrainapp', '0006_auto_20180318_1413'),
]
operations = [
migrations.AlterField(
model_name='raceevent',
name='badge',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, related_name='raceevents', to='terrainapp.Badge'),
),
migrations.AlterField(
model_name='raceevent',
name='description',
field=models.CharField(blank=True, default='', max_length=1000),
),
]
|
import sys
from io import BytesIO
import urllib.request
from bs4 import BeautifulSoup
import telegram
from flask import Flask, request, send_file
from time import sleep;
from fsm import TocMachine
#API_TOKEN = '488582332:AAE7swIh7w7ZM0sRUstZqubH5LKvHjz0Is0'
API_TOKEN='510481981:AAEZsN2FIDJLE7DCaM69BWDuzxguM5A-h_Q'
lastMessageId = 0;
bot = telegram.Bot(token=API_TOKEN);
machine = TocMachine(
states=[
'hasperson',
'menu',
'order',
'phone',
'news',
'finish'
],
transitions=[
{
'trigger': 'gomenu',
'source':'hasperson',
'dest': 'menu'
},
{
'trigger': 'goorder',
'source': 'hasperson',
'dest': 'order'
},
{
'trigger':'gonews',
'source':'hasperson',
'dest':'news'
},
{
'trigger':'gophone',
'source':'order',
'dest':'phone'
},
{
'trigger': 'goback',
'source': [
'hasperson',
'menu',
'order',
'phone',
'news',
'finish'
],
'dest': 'hasperson'
},
{
'trigger':'gofinish',
'source':'phone',
'dest':'finish',
},
{
'trigger':'staymenu',
'source':'menu',
'dest':'menu'
},
{
'trigger':'staynews',
'source':'news',
'dest':'news'
}
],
initial='hasperson',
auto_transitions=False,
show_conditions=True,
)
'''
def _set_webhook():
status = bot.set_webhook(WEBHOOK_URL)
if not status:
print('Webhook setup failed')
sys.exit(1)
else:
print('Your webhook URL has been set to "{}"'.format(WEBHOOK_URL))
@app.route('/hook', methods=['POST'])
def webhook_handler():
update = telegram.Update.de_json(request.get_json(force=True), bot)
machine.advance(update)
return 'ok'
@app.route('/show-fsm', methods=['GET'])
def show_fsm():
byte_io = BytesIO()
machine.graph.draw(byte_io, prog='dot', format='png')
byte_io.seek(0)
return send_file(byte_io, attachment_filename='fsm.png', mimetype='image/png')
'''
def getText(Update):
return Update["message"]["text"];
def getMessageId(Update):
return Update["update_id"];
def getChatId(Update):
return Update["message"]["chat"]["id"];
def getUserId(Update):
return Update["message"]["from_user"]["id"];
def messageHandler(Update):
global lastMessageId;
text = getText(Update);
msg_id = getMessageId(Update);
user_id = getUserId(Update);
lastMessageId = msg_id;
if text =='/start':
machine.trigger('goback');
elif text =='order' and machine.state=='hasperson':
#bot.sendMessage(user_id, '請問您要叫幾隻雞?');
machine.trigger('goorder');
elif text =='news' and machine.state=='hasperson':
#page=urllib.request.urlopen('http://home.so-net.net.tw/ywc580510/sale.html')
#soup=BeautifulSoup(page.read(),"html.parser")
#tmp=soup.find_all(width="168")
#inf=tmp[1];
#bot.sendMessage(user_id,inf.text)
machine.trigger('gonews');
elif text=='menu' and machine.state=='hasperson':
#bot.sendPhoto(user_id,'http://home.so-net.net.tw/ywc580510/images/dmall.jpg');
machine.trigger('gomenu');
elif text!='/start' and machine.state=='menu':
machine.trigger('staymenu');
elif text!='/start' and machine.state=='news':
machine.trigger('staynews');
elif text!='/start' and machine.state=='order':
machine.trigger('gophone');
elif text!='/start' and machine.state=='phone':
machine.trigger('gofinish');
print("Message From User:");
print(text);
print("State:");
print(machine.state);
if machine.state=='hasperson':
bot.sendMessage(user_id, '蛋蛋漢堡您好 點餐請輸入 order 觀看菜單請輸入 menu 欲知店家消息請輸入 news 重來請輸入 /start');
elif machine.state=='menu':
bot.sendPhoto(user_id,'http://home.so-net.net.tw/ywc580510/images/dmall.jpg');
elif machine.state=='order':
bot.sendMessage(user_id,'請輸入欲點的餐點:');
elif machine.state=='news':
page=urllib.request.urlopen('http://home.so-net.net.tw/ywc580510/sale.html')
soup=BeautifulSoup(page.read(),"html.parser")
tmp=soup.find_all(width="168")
inf=tmp[1];
bot.sendMessage(user_id,inf.text)
elif machine.state=='phone':
bot.sendMessage(user_id,'請輸入您的手機:');
elif machine.state=='finish':
bot.sendMessage(user_id,'完成訂購 歡迎下次光臨');
machine.trigger('goback');
bot.sendMessage(user_id, '蛋蛋漢堡您好 點餐請輸入 order 觀看菜單請輸入 menu 欲知店家消息請輸入 news 重來請輸入 /start');
print("State:");
print(machine.state);
return;
def main():
global lastMessageId;
Updates = bot.getUpdates();
if(len(Updates) > 0):
lastMessageId = Updates[-1]["update_id"];
while(True):
Updates = bot.getUpdates(offset=lastMessageId);
Updates = [Update for Update in Updates if Update["update_id"] > lastMessageId]
for Update in Updates:
messageHandler(Update);
sleep(0.5);
if __name__ == "__main__":
main();
|
__author__ = 'apavlenko'
import random
from model.contact import Contact
def test_modify_contact_db(app, db, check_ui):
old_contacts = db.get_contact_list()
if len(old_contacts) == 0:
app.contact.add_new_wo_group(Contact(firstname=app.firstname, middlename=app.middlename, lastname=app.lastname))
old_contacts = db.get_contact_list()
contact = random.choice(old_contacts)
new_contact_data = Contact(id=contact.id, firstname="Modifyed firstname", middlename="Modifyed middlename",
lastname="Modifyed lastname")
app.contact.modify_contact_by_id(contact.id, new_contact_data)
# TODO: update database
app.contact.count()
old_contacts[old_contacts.index(contact)] = contact
new_contacts = db.get_contact_list()
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
if check_ui:
assert sorted(new_contacts, key=Contact.id_or_max) == sorted(app.contact.get_contact_list_from_home_page(),
key=Contact.id_or_max)
def test_modify_contact_by_index(app):
old_contacts = app.contact.get_contact_list_from_home_page()
if len(old_contacts) == 0:
app.contact.add_new_wo_group(Contact(firstname=app.firstname, middlename=app.middlename, lastname=app.lastname))
old_contacts = app.contact.get_contact_list_from_home_page()
index = random.randrange(len(old_contacts))
contact = Contact(firstname="New Contact", middlename="New Contact", lastname="New Contact")
contact.id = old_contacts[index].id
app.contact.modify_contact_by_index(index, contact)
new_contacts = app.contact.get_contact_list_from_home_page()
assert len(old_contacts) == len(new_contacts)
old_contacts[index] = contact
assert sorted(old_contacts, key=Contact.id_or_max) == sorted(new_contacts, key=Contact.id_or_max)
|
#!/usr/bin/env python3
########################################################
### Grading Script for CSE 231 ###
### Built by Cody Littley ###
### First used January 2014 ###
########################################################
import sys
import os
import time
ROOT_HANDIN_DIRECTORY = "/user/cse231/Handin/" #this should point to the folder containing all of the handin data, filepath should end in a "/" character
SECTION_IDENTIFIER = "Section" #the prefix for a section folder
SCORE_SHEET_FILE_STYLE = "*.score" #this is the pattern for the file name used to open the score sheet. Assumes "*" char is student netID
FILES_TO_OPEN = ["*.output","proj*.py"] #these files will automatically be opened when grading each directory (other than the score sheet)
EDITOR = "gedit"
FILES_TO_OPEN.append(SCORE_SHEET_FILE_STYLE)
def process_args():
'''Grab command line arguments'''
flags_to_catch = ["-h","--help","-s","--section","-p","--project","-n","--netid","-k","--skip","-f","--file","-r","--prompt","__run_a_prompt_shell__"]
prev_arg = ""
arg_dict = {}
for arg in sys.argv:
if arg in flags_to_catch:
if arg in arg_dict:
pass
else:
arg_dict[arg] = []
prev_arg = arg
elif prev_arg != "":
arg_dict[prev_arg].append(arg)
return arg_dict
def validate_sections(sections,students):
'''Prompt the user for sections (if not specified in arguments) and check validity of sections'''
if sections == [] and students == []:
sections = input("Which section(s) would you like to grade?: ").split(",")
for index,section in enumerate(sections):
sections[index] = section.strip()
#It is assumed that all section folders will be named with the following format: "SECTION_IDENTIFIERxxx" where xxx is an integer section number
#This script will allow users to simply type the number of their section instead of the full name of the folder
dirs_at_root = os.listdir(ROOT_HANDIN_DIRECTORY)
valid_section_choices = []
for directory in dirs_at_root:
if directory.find(SECTION_IDENTIFIER) == 0:
sNumber = directory[len(SECTION_IDENTIFIER):]
try:
sNumber = int(sNumber)
valid_section_choices.append(sNumber)
except Exception:
pass
valid_sections = []
for section in sections:
try:
section = int(section)
if section not in valid_section_choices:
raise Exception("invalid section")
valid_sections.append(section)
except Exception:
if(section != ""):
print("\nSection",section,"is invalid.\n")
if valid_sections == [] and students == []:
print("\nNo valid sections detected. Program will now halt.\n")
exit()
#remove duplicates
valid_sections = list(set(valid_sections))
return valid_sections
def validate_projects(projects,sections,student_list):
'''Prompt the user for projects (if not specified in arguments) and check validity'''
if projects == []:
projects = input("Which project(s) would you like to grade?: ").split(",")
for index,section in enumerate(projects):
projects[index] = section.strip()
#In order for a project to be valid, it must be contained in each of the student directories that is being graded (even if the directory is empty)
#A project name does not need to be an integer
invalid_projects = []
dirs_at_root = os.listdir(ROOT_HANDIN_DIRECTORY)
for directory in dirs_at_root:
try:
if directory.find(SECTION_IDENTIFIER) == 0 and int(directory[len(SECTION_IDENTIFIER):]) in sections:
students_in_section = os.listdir(ROOT_HANDIN_DIRECTORY + directory)
for student in students_in_section:
dirs_in_student_folder = os.listdir(ROOT_HANDIN_DIRECTORY + directory + "/" + student)
for index,student_directory in enumerate(dirs_in_student_folder):
try: #if it can be converted to an integer then do it (so that 001 is the same as 01 and 1)
dirs_in_student_folder[index] = int(student_directory)
except Exception:
pass
for project in projects:
if int(project) not in dirs_in_student_folder:
invalid_projects.append(str(project))
except Exception:
pass
for student in student_list:
students_section = ""
for directory in dirs_at_root:
if directory.find(SECTION_IDENTIFIER) == 0:
students_in_section = os.listdir(ROOT_HANDIN_DIRECTORY + directory)
if student in students_in_section:
students_section = directory
break
dirs_in_student_folder = os.listdir(ROOT_HANDIN_DIRECTORY + directory + "/" + student)
good_projects = []
for index,student_directory in enumerate(dirs_in_student_folder):
try: #if it can be converted to an integer then do it (so that 001 is the same as 01 and 1)
dirs_in_student_folder[index] = int(student_directory)
except Exception:
pass
for project in projects:
try:
if int(project) in dirs_in_student_folder:
good_projects.append(str(project))
except Exception:
if project in dirs_in_student_folder:
good_projects.append(project)
for P in projects:
if P not in good_projects:
invalid_projects.append(P)
valid_projects = []
for project in projects:
if project not in invalid_projects:
valid_projects.append(project)
else:
print("\nProject " + project + " is invalid.")
if valid_projects == []:
print("\nNo valid projects detected. Program will now halt.\n")
exit()
return valid_projects
def validate_student(student):
'''Ensure that a student is in the file system. Can handle partial names. Returns multiple students if there are multiple matches'''
dirs_at_root = os.listdir(ROOT_HANDIN_DIRECTORY)
student_found = 0
student_list = []
for directory in dirs_at_root:
if directory.find(SECTION_IDENTIFIER) == 0:
students_in_section = os.listdir(ROOT_HANDIN_DIRECTORY + directory)
for full_student_name in students_in_section:
if full_student_name.find(student) == 0:
student_list.append(full_student_name)
if student_list == []:
print("\nNo student with netID pattern \""+student+"\" could be found. Program will now halt.\n")
exit()
return student_list
def construct_full_student_list(sections,students):
'''Break apart sections into lists of students and merge with the students list'''
dirs_at_root = os.listdir(ROOT_HANDIN_DIRECTORY)
for directory in dirs_at_root:
try:
if directory.find(SECTION_IDENTIFIER) == 0 and int(directory[len(SECTION_IDENTIFIER):]) in sections:
students_in_section = os.listdir(ROOT_HANDIN_DIRECTORY + directory)
students += students_in_section
except:
pass
students = list(set(students))
return sorted(students)
def check_for_errors(section,student,project):
'''Do a sanity check on the scores entered by the grader'''
score_sheet = SCORE_SHEET_FILE_STYLE
if SCORE_SHEET_FILE_STYLE.find("*") != -1:
score_sheet = score_sheet.replace("*",student)
grade_file = open(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+score_sheet,"r")
alarms = []
max_score = None
given_score = None
sum_of_parts = 0
state = 1
for line in grade_file:
line = line.strip()
if state == 1:
if line == "":
continue
else:
given_score_pos = line.find("Score: __") + len("Score: __")
given_score_end = line[given_score_pos:].find("__") + given_score_pos
try:
max_score = int(line[given_score_end+5:])
given_score = int(line[given_score_pos:given_score_end])
state = 2
except Exception:
alarms.append("Could not parse score!")
state = -1
elif state == 2:
score_start_pos = line.find("__")
score_end_pos = line[score_start_pos+2:].find("__") + score_start_pos + 2
try:
value = int(line[score_start_pos+2:score_end_pos])
sum_of_parts += value
except Exception:
pass
if sum_of_parts != given_score:
alarms.append("Sum of components do not match the given score.")
if given_score > max_score:
alarms.append("The given score is greater than the maximum allowable points.")
if given_score == 0:
alarms.append("You have given a Zero for this assignment.")
if alarms == []:
#make a hidden log file
#lets program know if file is already graded, can be used to track if a project has been graded
#look at time stamp to see when it was graded if needed, also track which user completed the grading
os.system("echo \"$USER\" > "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/.graded")
return
print("\n========================= Score sheet sanity check! =========================\n")
for alarm in alarms:
print(alarm+"\n")
user_input = input("To ignore this warning, type \"i\".\nTo re-examine the score sheet, type anything else: ")
if user_input == "i":
#make a hidden log file
#lets program know if file is already graded, can be used to track if a project has been graded
#look at time stamp to see when it was graded if needed, also track which user completed the grading
os.system("echo \"$USER\" > "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/.graded")
return
else:
score_sheet = SCORE_SHEET_FILE_STYLE
if SCORE_SHEET_FILE_STYLE.find("*") != -1:
score_sheet = score_sheet.replace("*",student)
os.system(EDITOR+" "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+score_sheet+" &")
input("\nPress enter to continue\n")
check_for_errors(section,student,project)
def prompt(section,student,project):
'''Called if the optional "--prompt" flag is detected. Allows the grader to enter scores and then adds them together.'''
command = "python3 grade.py __run_a_prompt_shell__ " + section + " " + student + " " + project
os.system("gnome-terminal -x " + command)
def prompt_shell(args):
'''This is a special function that is run as a stand alone program in another window. It extends the prompt function.'''
section = args[0]
student = args[1]
project = args[2]
print("Now grading "+student+"'s solution to project "+project+":\n")
print("To use a previously entered score, press enter.")
print("To go \"up\" to re-grade a category, type the letter \"u\" instead of a number.\n")
score_sheet = SCORE_SHEET_FILE_STYLE
if SCORE_SHEET_FILE_STYLE.find("*") != -1:
score_sheet = score_sheet.replace("*",student)
score_file = open(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+score_sheet,"r")
score_file_list = []
for line in score_file:
score_file_list.append(line)
score_file.close()
primary_score_line = -1
lines_with_scores = []
actual_scores = []
comments = len(score_file_list) -1
for linum,line in enumerate(score_file_list):
if line.find("Score: __") != -1:
primary_score_line = linum
elif line.strip().find("__") == 0:
lines_with_scores.append(linum)
elif line.strip().find("TA Comments") == 0:
lines_with_scores.append(linum)
break
if linum == -1:
exit()
for line in lines_with_scores[:-1]:
actual_scores.append(None)
score_index = 0
while(score_index < len(lines_with_scores)):
if score_index == len(lines_with_scores)- 1:
theSum = 0
for s in actual_scores:
theSum += s
if int(theSum) == theSum:
theSum = int(theSum)
begin = score_file_list[primary_score_line].find("Score: __")
end = score_file_list[primary_score_line].find("__",begin+len("Score: __"))
score_file_list[primary_score_line] = score_file_list[primary_score_line][:begin+len("Score: __")] + str(theSum) + score_file_list[primary_score_line][end:]
break
print(score_file_list[lines_with_scores[score_index]])
for description in range(lines_with_scores[score_index]+1,lines_with_scores[score_index+1]):
if score_file_list[description].strip() != "" and score_file_list[description].strip() != "\n":
print(score_file_list[description].replace("\n",""))
print()
grade_value = input("Enter new grade: ")
print()
begin = score_file_list[lines_with_scores[score_index]].find("__")
end = score_file_list[lines_with_scores[score_index]].find("__",begin+2)
if grade_value == "":
try:
actual_scores[score_index] = float(score_file_list[lines_with_scores[score_index]][begin+2:end])
except Exception:
actual_scores[score_index] = 0
elif grade_value == "u":
score_index -= 1
continue
else:
try:
actual_scores[score_index] = (float(grade_value))
except Exception:
print("Error: invalid input\n")
continue
score_file_list[lines_with_scores[score_index]] = score_file_list[lines_with_scores[score_index]][:begin+2] + grade_value + score_file_list[lines_with_scores[score_index]][end:]
score_index += 1
score_file = open(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+score_sheet,"w")
for line in score_file_list:
score_file.write(line)
score_file.close()
score_sheet = SCORE_SHEET_FILE_STYLE
if SCORE_SHEET_FILE_STYLE.find("*") != -1:
score_sheet = score_sheet.replace("*",student)
os.system(EDITOR+" "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+score_sheet+" &")
print("<<< This window will automatically close >>>")
time.sleep(5) #to ensure that the grade sheet has enough time to be opened
exit()
def grade(students,projects,mode_regrade,mode_prompt):
'''Step through the projects and students and allow them to be graded'''
for project in projects:
for student in students:
os.system("clear")
dirs_at_root = os.listdir(ROOT_HANDIN_DIRECTORY)
section = ""
for directory in dirs_at_root:
if directory.find(SECTION_IDENTIFIER) != 0:
continue
try:
students_in_section = os.listdir(ROOT_HANDIN_DIRECTORY+directory)
except Exception:
continue
if student in students_in_section:
section = directory
#ensure that the proper project name is used, project 1 might actually be listed as project 01 or 001
project = str(project)
available_projects = os.listdir(ROOT_HANDIN_DIRECTORY+section+"/"+student)
zeros = 0
while project not in available_projects and zeros <= 3:
zeros += 1
project = "0"+project
student_project_files = os.listdir(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project)
if ".graded" in student_project_files:
if mode_regrade == True:
user_response = input("Re-Grade project "+str(project)+" for "+student+"? (y/n): ")
if user_response == "n":
continue
elif mode_regrade == False:
continue
else:
user_response = input("Grade project "+str(project)+" for "+student+"? (y/n): ")
if user_response == "n":
continue
#actually start the grading
for file_to_open in FILES_TO_OPEN:
os.system(EDITOR+" "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+file_to_open+" &")
if mode_prompt:
prompt(section,student,project)
print()
for f in os.listdir(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project):
if f[0] != ".":
print(f)
print()
print("To run a program-----------------\"run PROGRAM_NAME [arguments]\"")
print("To list files--------------------\"ls\"")
print("To open a file-------------------\"open FILE_NAME\"")
print("To continue----------------------\"c\"")
print("To quit--------------------------\"q\"\n")
while(True):
user_input = input("--> ")
if user_input == "q":
check_for_errors(section,student,project)
exit_message()
exit()
elif user_input == "c":
check_for_errors(section,student,project)
break
elif user_input == "ls":
print()
for f in os.listdir(ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project):
if f[0] != ".":
print(f)
print()
elif user_input.find("run") == 0:
try:
command = "python3 -i "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+" ".join(user_input.split()[1:])
os.system("gnome-terminal --working-directory="+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+" -x "+command)
except Exception as e:
print("Could not run program")
elif user_input.find("open") == 0:
command = user_input.split()
os.system(EDITOR+" "+ROOT_HANDIN_DIRECTORY+section+"/"+student+"/"+project+"/"+" ".join(command[1:])+" &")
else:
print("\nTo run a program-----------------\"run PROGRAM_NAME [arguments]\"")
print("To open a file-------------------\"open FILE_NAME\"")
print("To continue----------------------\"c\"")
print("To quit--------------------------\"q\"\n")
exit_message()
def exit_message():
print("\n")
print(" . * . .")
print(" * -0-")
print(" . . * - )-")
print(" . * o . *")
print(" o save |")
print(" your -O-")
print(" . files! * . -0-")
print(" * o . ' * . o")
print(" . . | *")
print(" * * -O- .")
print(" . * | ,")
print(" . o")
print(" .---.")
print(" = _/__~0_\_ . * o '")
print(" = = (_________) .")
print(" . *")
print(" * - ) - *")
print(" . .")
print()
if __name__ == "__main__":
try:
os.system("clear")
arg_dict = process_args()
mode_regrade = True
mode_prompt = False
sections = []
students = []
projects = []
netIDs = []
if "__run_a_prompt_shell__" in arg_dict:
prompt_shell(arg_dict["__run_a_prompt_shell__"])
exit()
#because art
print(" ".join(sys.argv))
print(" ")
print(" o ")
print(" o o ")
print(" o ")
print(" o o o o ")
print(" ")
print(" \_O__o o__O_/ ")
print(" | | ")
print(" / ) ( \ ")
print(" +=============================================+ ")
print(" | CSE 231 Grading Script | ")
print(" ")
####################
# Configuration #
####################
if "-f" in arg_dict or "--file" in arg_dict:
try:
FILES_TO_OPEN += arg_dict["-f"]
except Exception:
pass
try:
FILES_TO_OPEN += arg_dict["--file"]
except Exception:
pass
if "-k" in arg_dict or "--skip" in arg_dict:
mode_regrade = False
if "-r" in arg_dict or "--prompt" in arg_dict:
mode_prompt = True
FILES_TO_OPEN.remove(SCORE_SHEET_FILE_STYLE)
if "-s" not in arg_dict and "--section" not in arg_dict and "-n" not in arg_dict and "--netid" not in arg_dict:
user_selection = input("\nWould you like to grade multiple students? (y/n): ")
if(user_selection.lower() == "n"):
student_to_grade = input("What is the netID of the student you would like to grade?: ")
students += validate_student(student_to_grade)
if "-n" in arg_dict or "--netid" in arg_dict:
nedIDs_to_add = []
try:
nedIDs_to_add += arg_dict["-n"]
except Exception:
pass
try:
nedIDs_to_add += arg_dict["--netid"]
except Exception:
pass
for netID in nedIDs_to_add:
students += validate_student(netID)
if "-s" in arg_dict or "--section" in arg_dict:
try:
sections += arg_dict["-s"]
except Exception:
pass
try:
sections += arg_dict["--section"]
except Exception:
pass
sections = validate_sections(sections,students)
if "-p" in arg_dict or "--project" in arg_dict:
try:
projects += arg_dict["-p"]
except Exception:
pass
try:
projects += arg_dict["--projects"]
except Exception:
pass
projects = validate_projects(projects,sections,students)
students = construct_full_student_list(sections,students)
grade(students,projects,mode_regrade,mode_prompt)
except EOFError:
exit_message()
exit()
|
class Solution(object):
def wiggleMaxLength(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
if len(nums) <= 1:
return len(nums)
i = 1
while i < len(nums) and nums[i] == nums[i - 1]:
i += 1
if i == len(nums):
return 1
is_increase = nums[0] > nums[i]
result = 2
prev = nums[i]
for num in nums[i + 1:]:
if is_increase:
if num > prev:
result += 1
prev = num
is_increase = False
elif num < prev:
prev = num
else:
if num < prev:
result += 1
prev = num
is_increase = True
elif num > prev:
prev = num
return result
|
from encoder.params_model import model_embedding_size as speaker_embedding_size
from utils.argutils import print_args
from utils.modelutils import check_model_paths
from synthesizer.inference import Synthesizer
from encoder import inference as encoder
from vocoder import inference as vocoder
from pathlib import Path
from os.path import normpath, basename
import numpy as np
import soundfile as sf
import librosa
import argparse
import torch
import sys
from audioread.exceptions import NoBackendError
if __name__ == '__main__':
## Info & args
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
##Pretrained path for encoder
parser.add_argument("-e", "--enc_model_fpath", type=Path,
default="encoder/saved_models/pretrained.pt",
help="Path to a saved encoder")
##Pretrained path for synthesizer
parser.add_argument("-s", "--syn_model_dir", type=Path,
default="synthesizer/saved_models/logs-pretrained/",
help="Directory containing the synthesizer model")
##Pretrained path for vocoder
parser.add_argument("-v", "--voc_model_fpath", type=Path,
default="vocoder/saved_models/pretrained/pretrained.pt",
help="Path to a saved vocoder")
parser.add_argument("-t", "--input_text", type=Path,
default="inputtxt.txt",
help="Path to a saved vocoder")
parser.add_argument("-f", "--reference_voice", type=Path,
default="nickTest.wav",
help="Path to a saved vocoder")
parser.add_argument("--low_mem", action="store_true", help=\
"If True, the memory used by the synthesizer will be freed after each use. Adds large "
"overhead but allows to save some GPU memory for lower-end GPUs.")
parser.add_argument("--no_sound", action="store_true", help=\
"If True, audio won't be played.")
parser.add_argument("--seed", type=int, default=None, help=\
"Optional random number seed value to make toolbox deterministic.")
parser.add_argument("--no_mp3_support", action="store_true", help=\
"If True, disallows loading mp3 files to prevent audioread errors when ffmpeg is not installed.")
args = parser.parse_args()
print_args(args, parser)
if not args.no_sound:
import sounddevice as sd
#if torch.cuda.is_available():
# device_id = torch.cuda.current_device()
# gpu_properties = torch.cuda.get_device_properties(device_id)
# ## Print some environment information (for debugging purposes)
# print("Found %d GPUs available. Using GPU %d (%s) of compute capability %d.%d with "
# "%.1fGb total memory.\n" %
# (torch.cuda.device_count(),
# device_id,
# gpu_properties.name,
# gpu_properties.major,
# gpu_properties.minor,
# gpu_properties.total_memory / 1e9))
#else:
# print("Using CPU for inference.\n")
## Remind the user to download pretrained models if needed
check_model_paths(encoder_path=args.enc_model_fpath, synthesizer_path=args.syn_model_dir,
vocoder_path=args.voc_model_fpath)
## Load the models one by one.
print("Preparing the encoder, the synthesizer and the vocoder...")
encoder.load_model(args.enc_model_fpath)
synthesizer = Synthesizer(args.syn_model_dir.joinpath("taco_pretrained"), low_mem=args.low_mem, seed=args.seed)
vocoder.load_model(args.voc_model_fpath)
#### Interactive speech generation ###
print("Synthesizing speech on the command line!\n")
# Get the reference audio filepath
#message = "Reference voice: enter an audio filepath of a voice to be cloned (mp3, " \
# "wav, m4a, flac, ...):\n"
#in_fpath = Path(input(message).replace("\"", "").replace("\'", ""))
in_fpath = args.reference_voice
## Computing the embedding
# First, we load the wav using the function that the speaker encoder provides. This is
# important: there is preprocessing that must be applied.
# The following two methods are equivalent:
# - Directly load from the filepath:
preprocessed_wav = encoder.preprocess_wav(in_fpath)
# - If the wav is already loaded:
original_wav, sampling_rate = librosa.load(str(in_fpath))
preprocessed_wav = encoder.preprocess_wav(original_wav, sampling_rate)
print("Loaded file succesfully")
# Then we derive the embedding. There are many functions and parameters that the
# speaker encoder interfaces. These are mostly for in-depth research. You will typically
# only use this function (with its default parameters):
embed = encoder.embed_utterance(preprocessed_wav)
print("Created the embedding")
print("Interactive generation loop")
num_generated = 0
## Read the text file containing sentences
textFile = open(args.input_text, "r")
## Loop through each of the lines in the text file.
for textLine in textFile:
try:
## Generating the spectrogram
#text = input("Write a sentence (+-20 words) to be synthesized:\n"
text = textLine
# The synthesizer works in batch, so you need to put your data in a list or numpy array
texts = [text]
embeds = [embed]
# If you know what the attention layer alignments are, you can retrieve them here by
# passing return_alignments=True
specs = synthesizer.synthesize_spectrograms(texts, embeds)
spec = specs[0]
print("Created the mel spectrogram")
## Generating the waveform
print("Synthesizing the waveform:")
# If seed is specified, reset torch seed and reload vocoder
if args.seed is not None:
torch.manual_seed(args.seed)
vocoder.load_model(args.voc_model_fpath)
# Synthesizing the waveform is fairly straightforward. Remember that the longer the
# spectrogram, the more time-efficient the vocoder.
generated_wav = vocoder.infer_waveform(spec)
## Post-generation
# There's a bug with sounddevice that makes the audio cut one second earlier, so we
# pad it.
generated_wav = np.pad(generated_wav, (0, synthesizer.sample_rate), mode="constant")
# Trim excess silences to compensate for gaps in spectrograms (issue #53)
generated_wav = encoder.preprocess_wav(generated_wav)
# Play the audio (non-blocking)
#if not args.no_sound:
# try:
# sd.stop()
# sd.play(generated_wav, synthesizer.sample_rate)
# except sd.PortAudioError as e:
# print("\nCaught exception: %s" % repr(e))
# print("Continuing without audio playback. Suppress this message with the \"--no_sound\" flag.\n")
# except:
# raise
# Save it on the disk
filename = str(basename(normpath(args.syn_model_dir))) + "-%02d.wav" % num_generated
print(generated_wav.dtype)
sf.write(filename, generated_wav.astype(np.float32), synthesizer.sample_rate)
num_generated += 1
print("\nSaved output as %s\n\n" % filename)
except Exception as e:
print("Caught exception: %s" % repr(e))
print("Restarting\n")
|
import reversion
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.postgres.fields import JSONField
from django.core import validators
from django.core.exceptions import ValidationError
from django.utils.functional import cached_property
from base.fields import AutoUUIDField
from base.fields import ValidatedJSONField
from base.serializers import freeze
from reportforms.schema import (
FORM_SCHEMA,
get_default_form,
get_schema_field_count,
validate_answer_for_schema,
)
@reversion.register()
class ReportForm(models.Model):
id = AutoUUIDField()
codename = models.CharField(max_length=250, unique=True)
name = models.CharField(max_length=250)
description = models.TextField()
def __str__(self):
return f'{self.codename} - {self.name}'
@reversion.register()
class ReportFormVersion(models.Model):
id = AutoUUIDField()
code = models.IntegerField()
title = models.CharField(max_length=200)
form = models.ForeignKey(
'ReportForm',
on_delete=models.CASCADE,
related_name='versions',
)
form_schema = ValidatedJSONField(schema=FORM_SCHEMA, default=get_default_form)
field_count = models.SmallIntegerField(default=0, editable=False)
def save(self, *args, **kwargs):
if bool(self.form_schema):
self.field_count = get_schema_field_count(self.form_schema)
# don't allow schema changes if the version has any instances
previous_state = type(self).objects.filter(pk=self.id).first()
if previous_state is not None and \
self.instances.count() > 0 and \
freeze(previous_state.form_schema) != freeze(self.form_schema):
raise ValidationError("the version has at least one instance")
return super().save(*args, **kwargs)
def __str__(self):
return f'{self.form.codename} - v{self.code}'
class Meta:
unique_together = (('code', 'form',),)
@reversion.register()
class ReportFormInstance(models.Model):
class State:
OPEN = 'open'
CLOSED = 'closed'
NEW_SENDING = 'new_sending'
NEW_SENT = 'new_sent'
ANSWER_TO_VALIDATE = 'answer_to_validate'
ANSWER_VALIDATED = 'answer_validated'
ANSWER_SENDING = 'answer_sending'
ANSWER_RECEIVED = 'answer_received'
ANSWER_SENT = 'answer_sent'
ANSWER_REVIEWED = 'answer_reviewed'
STATE_TYPES = (
(State.OPEN, 'Open'),
(State.CLOSED, 'Closed'),
(State.NEW_SENDING, 'Sending'),
(State.NEW_SENT, 'Sent'),
(State.ANSWER_TO_VALIDATE, 'To Validate'),
(State.ANSWER_VALIDATED, 'Answer Validated'),
(State.ANSWER_SENDING, 'Answer Sending'),
(State.ANSWER_SENT, 'Answer Sent'),
(State.ANSWER_RECEIVED, 'Answer Received'),
(State.ANSWER_REVIEWED, 'Answer Reviewed'),
)
CLOSED_STATES = [
State.ANSWER_REVIEWED,
State.CLOSED
]
SENT_STATES = [
State.ANSWER_SENT,
State.ANSWER_RECEIVED,
State.ANSWER_REVIEWED,
]
id = AutoUUIDField()
version = models.ForeignKey(
'ReportFormVersion',
on_delete=models.PROTECT,
related_name='instances',
)
trimester = models.PositiveSmallIntegerField(validators=[validators.MinValueValidator(1),
validators.MaxValueValidator(4)])
year = models.PositiveSmallIntegerField()
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
answer = JSONField(default=dict, blank=True)
answer_started = models.BooleanField(default=False, editable=False)
sent_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='+',
blank=True,
null=True,
)
sent_at = models.DateTimeField(blank=True, null=True)
received_at = models.DateTimeField(blank=True, null=True)
state = models.CharField(
max_length=200,
choices=STATE_TYPES,
default=State.NEW_SENDING,
)
reason = models.TextField(default='')
target = models.ForeignKey(
'targets.Target',
on_delete=models.PROTECT,
related_name='form_instances',
)
documents = models.ManyToManyField(
'documents.Document',
related_name='+',
blank=True,
)
@cached_property
def answer_count(self):
count = 0
if bool(self.answer):
count = len(self.answer)
return count
def validate_answer(self, answer):
return validate_answer_for_schema(answer, self.version.form_schema)
def save(self, *args, **kwargs):
if not self.answer_started:
self.answer_started = bool(self.answer)
return super().save(*args, **kwargs)
def __str__(self):
return f'{str(self.version)} (Target: {str(self.target)}, trimester: {self.trimester}, year: {self.year})'
class Meta:
ordering = ['-created_at']
class ReportFormComment(models.Model):
id = AutoUUIDField()
form_instance = models.ForeignKey(
'ReportFormInstance',
on_delete=models.CASCADE,
related_name='comments',
editable=False,
)
content = models.TextField()
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='+',
null=True,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
class FormInstanceRequest(models.Model):
class State:
CREATED = 'created'
ACCEPTED = 'accepted'
REJECTED = 'rejected'
STATE_TYPES = (
(CREATED, 'Request created'),
(ACCEPTED, 'Request accepted'),
(REJECTED, 'Request rejected'),
)
id = AutoUUIDField()
new_instance = models.ForeignKey(
'ReportFormInstance',
on_delete=models.PROTECT,
related_name='+',
null=True,
blank=True
)
old_instance = models.ForeignKey(
'ReportFormInstance',
on_delete=models.PROTECT,
related_name='form_requests',
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='+',
null=True,
blank=True
)
received_at = models.DateTimeField(blank=True, null=True)
state = models.CharField(
max_length=200,
choices=State.STATE_TYPES,
default=State.CREATED,
)
comment = models.TextField(blank=True, null=True)
def __str__(self):
return f'{str(self.old_instance)} (Created at: {str(self.created_at)})'
class Meta:
ordering = ['-created_at']
class FormCase(models.Model):
class State:
OPEN = 'open'
CLOSED = 'closed'
STATE_TYPES = (
(State.OPEN, 'Open'),
(State.CLOSED, 'Closed'),
)
id = AutoUUIDField()
form_instance = models.ForeignKey(
'ReportFormInstance',
on_delete=models.CASCADE,
related_name='cases',
editable=False,
)
reassign_to = models.ForeignKey(
'ReportFormInstance',
on_delete=models.CASCADE,
related_name='instance',
editable=False,
null=True,
)
title = models.CharField(max_length=200)
description = models.TextField()
state = models.CharField(
max_length=20,
choices=STATE_TYPES,
default=State.OPEN,
)
documents = models.ManyToManyField(
'documents.Document',
related_name='+',
blank=True,
)
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='+',
null=True,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
closed_at = models.DateTimeField(blank=True, null=True)
class Meta:
ordering = ['-created_at']
class FormCaseComment(models.Model):
case = models.ForeignKey(
'FormCase',
on_delete=models.CASCADE,
related_name='comments',
editable=False,
)
content = models.TextField()
created_by = models.ForeignKey(
settings.AUTH_USER_MODEL,
on_delete=models.SET_NULL,
related_name='+',
null=True,
)
created_at = models.DateTimeField(auto_now_add=True, db_index=True)
updated_at = models.DateTimeField(auto_now=True)
|
# Generated by Django 3.1.2 on 2020-10-08 12:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('webgui', '0002_auto_20201008_1150'),
]
operations = [
migrations.AddField(
model_name='booking',
name='token',
field=models.CharField(blank=True, max_length=16, verbose_name='Token'),
),
]
|
# cook your dish here
# from math import *
#for _ in range(int(input().strip())):
x,y,z=map(int,input().split())
print((x-z)//(y+z)) |
#Steven Brown
#Assignment_3
# 25 FEB 2016
from __future__ import print_function
import unittest
import sys
trace = False
'''A dictionary class implemented by hasing and chaing. The set
is represented internally by as list of lists. The outer list is
initialized to all None's. When multiple values hash to the same location
(collision), new values are appened to the list at that location. The
list is rehashed when @75%max and trial of reshash @25%min. The size
starts at 10 and doubles with each rehash. The first value of a sublist is the
key, the second the value.
The initial list is:
[None, None, None, None, None, None, None, None, None, None]
After 0, "zero" 1, "one" 10, "ten" have been inserted, it will be:
[[[0, "zero"],[10, "ten"]],[[1, "one"]], None, None,...]'''
class my_hash_set:
def __init__(self, init=None):
self.__limit = 10
self.__items = [None] * self.__limit
self.__count = 0
if init:
for i in init:
self.__setitem__(i[0], i[1])
def __len__(self): return(self.__count)
def __flattened(self):
flattened = filter(lambda x:x != self.__none, self.__items)
flattened = [item for inner in flattened for item in inner]
return (flattened)
def __iter__(self): return(iter(self.__flattened()))
def __str__(self): return (str(self.__flattened()))
def __setitem__(self, key, value):
h = hash(key) % self.__limit
if not value[h]:
self.__items[h] = ([key, value])
else:
self.__items[h].append([key, value])
self.__count += 1
if (0.0 + self.__count) / self.__limit > .75: self.__rehash()
def __rehash(self):
pass
def __contains__(self, key):
pass
def __getitem__(self, key):
pass
def __delitem__(self, key):
pass
class test_my_hash_set(unittest.TestCase):
def test_empty(self):
self.assertEqual(len(my_hash_set()), 0)
def test_add_one(self):
s = my_hash_set()
s[1] = "one"
s[2] = "two"
self.assertEquals(len(s), 1)
self.assertEquals(s[1], "one")
def test_add_two(self):
s = my_hash_set()
s[1] = "one"
s[2] = "two"
self.assertEquals(len(s), 2)
def test_add_twice(self):
s = my_hash_set()
s[1] = "one"
s[1] = "one"
self.assetEquals(len(s), 1)
if __name__ == '__main__':
unittest.main() |
from x1scr.apps.news.models import News
from django import template
register = template.Library()
@register.inclusion_tag('news.html')
def pull_latest_news(howmany=3):
news = News.objects.filter(published=True).order_by('-date_published')[:howmany]
return dict(news=news)
|
# Copyright (C) 2019-2025 by
# ATTA Amanvon Ferdinand <amanvon238@gmail.com>
# All rights reserved.
# BSD license.
#
# Author: ATTA Amanvon Ferdinand (amanvon238@gmail.com)
"""
Ce module recoit les paramètres utiles aux cycles de simulation .
Ces paramètres lui servent à créer la topologie à utiliser dans chaque cycle de simulation. Ce nom sera utiliser par la classe Topology
du package topologies pour créer le graphe du réseau.
Un cycle de simulation qui est contient 5000 Processus.
Un processus se presente comme suit :
* Choisir les noeuds considérés comme ayant la capacité de conversion de longueur d'onde
* Génération des routages initial et final
* Migration du routage initial et final
Après un cycle de simulation, les mesures de performation(nombres d'interruption de flux,
nombres de longueurs d'ondes additionnelles utilisées et la durée d'interruption) sont evualuées
en terme de moyenne et de variance.
L'exécution de ce module effectue 3 cycles de simulation.
Chaque cycle de simulation concerne une plage de nombres de noeuds ayant la capacité de conversion à considérer.
3 plages étant considérer dans le
"""
from topologies.topo import Topology
from topologies.wc_placement import Placement
import os
import sys
import logging
import daiquiri
import numpy as np
import pandas as pd
class ReconfRoute:
"""
Classe chargée de recueillir la topologie réseau, les routes initial et final
puis lancer d'initer le processus de reconfiguration.
Parameters
----------
param: dict
Paramètres de simulation contenus dans une structure de données dictionnaire.
Ces paramètres sont la topologie réseau(param['net_topo']), le type de réseau(param['net_type']),
le nombre de processus par simulation(param['size']), le type de routage à considérer(param['paire_route_type'])
et l'algorithme à utiliser lors des simulations(param['algo'])
Examples
-------
Supposons que la topologie réseau fournit par la ligne de commande est nsfnet.
Le type de réseau est un réseau avec conversion partielle.
la nombre de processus est 5000. Le type de routage considérée est l'arbre mono-optique
et l'algorithme utilisé est swcnTreeReconf. on a alors
>>> param = {'net_topo': 'nsfnet', 'net_type': 0.5, 'size':5000, 'paire_route_type': 1, 'algo': 'swcnTreeReconf'}
>>> reconfRoute = ReconfRoute(param)
"""
def __init__(self, param=dict()):
"""
Constructeur de la classe.
"""
self.param = param
routing_class = load_class(package_name="routingGenerator", algo=route_pair(self.param['paire_route_type']))
self.route_generator = routing_class()
migration_class = load_class(package_name="routingMigration",
sub_package_name=migrate_net(self.param['net_type']),
algo=self.param['algo'])
self.migrate_route = migration_class()
#self.criteria = {'add_cost':}
def simulate(self):
"""
Cette fonction permet de lancer un cycle de simulation en fonction des paramètres de simulation.
Elle recoltera les resultats de la simuation afin d'une analyse ultérieurs de ces resultats.
Returns
-------
None
"""
os.chdir(os.path.dirname(os.path.abspath(__file__)))
directory = os.getcwd()
daiquiri.setup(level=logging.INFO, outputs=(
daiquiri.output.File(directory=directory, filename="mylauncher.log"), daiquiri.output.Stream(sys.stdout),
))
logger = daiquiri.getLogger(__name__)
# 1. Détermination de la topologie,
# 1.1 Création de la topologie
topo = Topology(self.param['net_topo'])
graph = topo.create_topo()
logger.info("Topology was created")
#exit(-1)
d = dict()
if self.param['net_type'] == 0.5:
d = {0: {'min': 50, 'max': 75}, 1: {'min': 25, 'max': 50},
2: {'min': 1, 'max': 25}} # Liste des plages de pourcentages de swcn choisies
elif self.param['net_type'] == 0:
d = {0: {'min': 0, 'max': 0}}
else:
d = {0: {'min': 100, 'max': 100}}
logger.info("Cycle of 3 Simulations starting")
#Pour chaque topologie virtuelle faire
df_add_cost = pd.DataFrame()
df_duration = pd.DataFrame()
df_interrupt_duration = pd.DataFrame()
if len(d) != 3:
df_add_cost = pd.DataFrame({str(d[0]['min'])+'-'+str(d[0]['max']):[0.0, 0.0, 0.0,0.0]}, index= ['AVG', 'SD', 'Min', 'Max'])
df_duration = pd.DataFrame({str(d[0]['min']) +'-'+str(d[0]['max']): [0.0, 0.0, 0.0, 0.0]}, index=['AVG', 'SD', 'Min', 'Max'])
df_interrupt_duration = pd.DataFrame({str(d[0]['min']) +'-'+ str(d[0]['max']): [0.0, 0.0, 0.0, 0.0]}, index=['AVG', 'SD', 'Min', 'Max'])
else:
df_add_cost = pd.DataFrame({str(d[0]['min']) +'-'+ str(d[0]['max']): [0.0, 0.0, 0.0, 0.0], str(d[1]['min']) +'-'+ str(d[1]['max']): [0.0, 0.0, 0.0, 0.0], str(d[2]['min']) +'-'+ str(d[2]['max']): [0.0, 0.0, 0.0, 0.0]}, index=['AVG', 'SD', 'Min', 'Max'])
df_duration = pd.DataFrame({str(d[0]['min']) +'-'+ str(d[0]['max']): [0.0, 0.0, 0.0, 0.0], str(d[1]['min']) +'-'+ str(d[1]['max']): [0.0, 0.0, 0.0, 0.0], str(d[2]['min']) +'-'+ str(d[2]['max']): [0.0, 0.0, 0.0, 0.0]}, index=['AVG', 'SD', 'Min', 'Max'])
df_interrupt_duration = pd.DataFrame({str(d[0]['min']) +'-'+ str(d[0]['max']): [0.0, 0.0, 0.0, 0.0], str(d[1]['min']) +'-'+ str(d[1]['max']): [0.0, 0.0, 0.0, 0.0], str(d[2]['min']) +'-'+ str(d[2]['max']): [0.0, 0.0, 0.0, 0.0]}, index=['AVG', 'SD', 'Min', 'Max'])
indice = 0
for key, item in d.items():
# Faire une simulation : une simulation contient N processus
N = self.param['size']
min_value = item['min']
max_value = item['max']
logger.info("Simulation " + str(key))
add_cost_array = np.zeros(N, dtype =int)
duration_array = np.zeros(N, dtype =int)
interrupt_duration_array = np.zeros(N, dtype=int)
for i in range(0, N):
# Pour chaque processus faire
print("TOUR :", i+1)
# Définition du processus
# 1.2 Choix des noeuds ayant la capacité de conversion de longueur d'onde
logger.info("Wavelength converter placement ")
place_wcn = Placement(min_value, max_value)
net = place_wcn.assign(graph)
# 1.3 Initialisation du flux
# Pour marquer que le flux est transporté par un lien on ajoute un attribut flow au lien avec pour valeur 1. Sinon 0
# On suppose qu'au debut l'attribut flow a pour valeur 0 sur chaque lien du graphe.
# Autrement dit, aucun flux ne circule sur les liens du graphe
for e in net.edges():
net[e[0]][e[1]]['edge_data'] = {'flow': 0}
# On suppose que les liens(fibres) ont une capacité de 16 longueurs distinctes
#wavelengths_list = list(range(0, 16))
# 2. Generation du routage initial et du routage final
logger.info("Routing generation process is starting ")
self.route_generator.set_net(net)
initial_route, final_route = self.route_generator.generate()
logger.info("Routing generation process completed successfully ")
# Génération du flux
#Simuler ici une circulation du flux sur l'arbre initial
# 3. Migration de routage
self.migrate_route.set_pair(initial_route, final_route)
logger.info("Routing switching process is starting ")
criteria_dict = self.migrate_route.run()
add_cost_array[i] = criteria_dict['add_cost']
duration_array[i] = criteria_dict['duration']
interrupt_duration_array[i] = criteria_dict['interrupt_duration']
#break
#break # # A enlever lorsqu'on aura fini
#print(add_cost_array)
#print(duration_array)
#print(np.mean(add_cost_array))
#print(np.mean(duration_array))
#exit(0)
#Update dataFrames
df_add_cost.at['Min', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.min(add_cost_array)
df_add_cost.at['Max', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.max(add_cost_array)
df_add_cost.at['AVG', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.mean(add_cost_array), 2)
df_add_cost.at['SD', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.std(add_cost_array), 2)
df_duration.at['Min', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.min(duration_array)
df_duration.at['Max', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.max(duration_array)
df_duration.at['AVG', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.mean(duration_array), 2)
df_duration.at['SD', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.std(duration_array), 2)
df_interrupt_duration.at['Min', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.min(interrupt_duration_array)
df_interrupt_duration.at['Max', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = np.max(interrupt_duration_array)
df_interrupt_duration.at['AVG', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.mean(interrupt_duration_array), 2)
df_interrupt_duration.at['SD', str(d[indice]['min']) + '-' + str(d[indice]['max'])] = round(np.std(interrupt_duration_array), 2)
indice = indice +1
#exit(0)
# save excel file
with pd.ExcelWriter('results'+os.sep+self.param['net_topo']+'.xlsx') as writer:
df_add_cost.to_excel(writer, sheet_name='add_cost')
df_duration.to_excel(writer, sheet_name='duration')
df_interrupt_duration.to_excel(writer, sheet_name='interrupt_duration')
print("-------------------------------------------------------")
print("cout additionnelle")
print(df_add_cost.head(4))
print("-------------------------------------------------------")
print("temps de reconfiguration")
print(df_duration.head(4))
print("-------------------------------------------------------")
print("interruption")
print(df_interrupt_duration)
print("Une copie de ces resultats sont dans le classeur {}".format('results'+os.sep+self.param['net_topo']+'.xlsx'))
def migrate_net(argument=0.5):
"""
La fonction retourne le type de réseau
Parameters
----------
argument: int
donnée representant un type de réseau
Returns
-------
str
net_type: le type de réseau(sans conversion(nwcn), conversion partielle(swcn) ou conversion totale(fwcn))
"""
switcher = {
0: "nwcn",
0.5: "swcn",
1: "fwcn"
}
net_type = switcher[argument]
return net_type
def route_pair(argument=1):
"""
La fonction retourne le type de routage
Parameters
----------
argument: int
donnée representant un type de routage
Returns
-------
str
le type de routage à utiliser pour la paire(pairOfLightTree, pairOfSemiLightTree ou pairOfLightForest)
"""
switcher = {
1: "pairOfLightTree",
2: "pairOfSemiLightTree",
3: "pairOfLightForest"
}
return switcher[argument]
def load_class(package_name, sub_package_name="",algo=""):
"""
Permet de retourner la classe adéquate à exécuter en fonction des paramètres fournis en entrées
Parameters
----------
package_name: str
le nom du package parent du "sous-package" devant contenir l'algorithme
sub_package_name: str
le nom du "sous-package" contenant le module de l'algorithme
algo: str
l'algorithme à executer
Returns
-------
classeType
Le type de la classe à instancier
"""
# print(os.path.dirname(os.path.abspath(__file__)))
os.chdir(os.path.dirname(os.path.abspath(__file__)))
algo_dir = os.getcwd()+os.sep+package_name+os.sep+sub_package_name
paths = list(sys.path)
#print("BEFORE", sys.path)
sys.path.insert(0, algo_dir)
#print("AFTER", sys.path)
#print('algo_dir', algo_dir)
#print('algo', algo)
module = ""
try:
module = __import__(algo)
# print("BON")
except:
print("Module inexistant!")
finally:
sys.path[:] = paths
try:
# print(algo[0].upper()+algo[1:])
#classname = module.__getitem__(algo[0].upper() + algo[1:])
classname = getattr(module, algo[0].upper() + algo[1:])
#print("SUPER!!!")
return classname
except:
print('classe non trouvée')
exit(-1)
|
import asyncio
import logging
import struct
import sys
from aiohttp import web
from .run_exec import run_exec
FMT_ID = b'\x01T91\x1d'
HEADER_STRUCT = '>5s2B2I'
MAX_LEN = 128 * 1024
logger = logging.getLogger('t9_exec_server')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
logger.addHandler(handler)
logger.info('Exec server starting up')
async def exec_endpoint(request):
exec_request = await request.json()
cmd = exec_request['Cmd']
env = exec_request.get('Env', {})
user = exec_request.get('User')
cwd = exec_request.get('WorkingDir')
timeout = exec_request.get('Timeout', 10)
logger.info(f'Received exec request cmd={cmd}')
status = 255
stdout = b''
stderr = b''
out_len = 0
err_len = 0
try:
status, stdout, stderr = await run_exec(cmd, env, user, cwd, timeout)
exc_status = 0
stdout = stdout[:MAX_LEN]
stderr = stderr[:MAX_LEN]
out_len = len(stdout)
err_len = len(stderr)
logger.info(f'Exec finished status={status} out_len={out_len} err_len={err_len}')
except asyncio.TimeoutError:
exc_status = 1
logger.info('Exec timed out')
except Exception as e:
exc_status = 255
stderr = str(e).encode()
err_len = len(stderr)
logger.error(f'Unhandled exception during exec: {str(e)}')
# build binary response body
struct_fmt = HEADER_STRUCT + str(out_len) + 's' + str(err_len) + 's'
response = struct.pack(struct_fmt, FMT_ID, exc_status, status, out_len, err_len, stdout, stderr)
return web.Response(body=response)
async def exit_endpoint(request):
logger.info('Got exit request')
sys.exit(0)
async def status_endpoint(request):
logger.info('Got status request -- status ok')
return web.Response(text='ok')
app = web.Application()
app.add_routes([
web.post('/exec', exec_endpoint),
web.post('/exit', exit_endpoint),
web.get('/status', status_endpoint),
])
web.run_app(app)
|
import time
print("Welcome User \nCreated by:- Simarjot Singh")
def myAlarm():
try:
myTime = list(map(int, input("Enter in hh mm ss format: ").split()))
if len(myTime) == 3:
total = myTime[0]*3600 + myTime[1]*60 + myTime[2]
time.sleep(total)
for i in range(10):
print('\a')
time.sleep(1)
else:
print("Invalid Format!")
myAlarm()
except Exception as e:
print("Exception: ",e)
myAlarm()
myAlarm()
|
import boto3
import re
from datetime import datetime,timedelta
from botocore.exceptions import ClientError
import boto3.session
def check_volume(volume_id):
if not volume_id: return ''
try:
volume=client.describe_volumes(VolumeIds=[volume_id])
if not volume['Volumes'][0]['VolumeId']:
return False
return True
except ClientError:
return False
def check_ami(image_id):
if not image_id: return ''
try:
image= client.describe_images(ImageIds=[image_id])
if not image['Images']:
return False
else:
if 'prod' in image['Images'][0]['Name']:
return 'Prod'
else:
return 'NonProd'
except ClientError:
return False
def get_snapshot_age(snapshot_id):
try:
response = client.describe_snapshots(SnapshotIds=[snapshot_id])
start_time=response['Snapshots'][0]['StartTime']
#current_date = datetime.datetime.strptime(str(datetime.date.today()), date_format)
#snapshot_creation_date = datetime.datetime.strptime(start_time, date_format)
current_date = datetime.now()
snapshot_age = (current_date-(start_time.replace(tzinfo=None))).days
return snapshot_age
except ClientError:
return False
def get_ami(snapshot_description):
regex = r"^Created by CreateImage\((.*?)\) for (.*?) "
parsed_data = re.findall(regex, snapshot_description, re.MULTILINE)
if len(parsed_data)>0:
ami_id= parsed_data[0][1]
return ami_id
else:
return False
def get_snapshot_type(snapshot_name,snapshot_description,snapshot_billing_environment):
if 'grafana' in snapshot_name.lower().strip():
snapshot_type='Prod'
return snapshot_type
if not snapshot_billing_environment:
if not snapshot_name:
if not snapshot_description:
snapshot_type='NonProd'
else:
if 'prod' in snapshot_description:
snapshot_type='Prod'
else:
snapshot_type='NonProd'
else:
if 'prod' in snapshot_name:
snapshot_type='Prod'
else:
snapshot_type='NonProd'
else:
pattern = '^prod'
result = re.match(pattern,snapshot_billing_environment.lower().strip())
if result:
snapshot_type='Prod'
else:
snapshot_type='NonProd'
return snapshot_type
def get_retention_period(ami_exists,volume_exists,snapshot_type):
retention_days=''
if not ami_exists and not volume_exists:
if snapshot_type=="Prod":
retention_days=180
else:
retention_days=90
else:
if snapshot_type=="NonProd":
retention_days=180
else:
retention_days=390
return retention_days
#regions=['us-east-1']
profile='account1'
regions=['us-east-1','ap-northeast-1','eu-west-1']
session= boto3.session.Session(profile_name=profile)
for region in regions:
client = session.client('ec2',region_name=region)
#response = client.describe_snapshots(OwnerIds=['xxxxx']) #account1
response = client.describe_snapshots(OwnerIds=['xxxxxxx']) #account2
for snapshot in response['Snapshots']:
snapshot_name=''
snapshot_description =''
snapshot_billing_environment=''
mark_for_deletion=False
if 'Tags' in snapshot:
for tags in snapshot['Tags']:
if tags["Key"] == 'Name':
snapshot_name = tags["Value"]
if tags["Key"] == 'BILLING_ENVIRONMENT':
snapshot_billing_environment = tags["Value"]
snapshot_description=snapshot['Description']
ami_id=get_ami(snapshot_description)
if ami_id:
snapshot_type=check_ami(ami_id)
if snapshot_type==False:
snapshot_type='Undetermined'
ami_exists=False
else:
ami_exists=True
else:
snapshot_type=get_snapshot_type(snapshot_name,snapshot_description,snapshot_billing_environment)
ami_exists=False
volume_exists=check_volume(snapshot['VolumeId'])
snapshot_age=get_snapshot_age(snapshot['SnapshotId'])
retention_period=get_retention_period(ami_exists,volume_exists,snapshot_type)
if snapshot_age > retention_period and not ami_exists:
try:
client.delete_snapshot(SnapshotId=snapshot['SnapshotId'])
#mark_for_deletion=True
except ClientError as e:
if 'InvalidSnapshot.InUse' in str(e):
print ("skipping this snapshot")
continue
#print(region,snapshot['SnapshotId'],snapshot_name,str(volume_exists),str(ami_exists),str(snapshot_age),snapshot_type,str(retention_period),str(mark_for_deletion))
|
"""
Module to work with raw voltage traces. Spike sorting pre-processing functions.
"""
from pathlib import Path
import numpy as np
import scipy.signal
import scipy.stats
import pandas as pd
from joblib import Parallel, delayed, cpu_count
from iblutil.numerical import rcoeff
import spikeglx
import neuropixel
import neurodsp.fourier as fourier
import neurodsp.utils as utils
def agc(x, wl=.5, si=.002, epsilon=1e-8, gpu=False):
"""
Automatic gain control
w_agc, gain = agc(w, wl=.5, si=.002, epsilon=1e-8)
such as w_agc * gain = w
:param x: seismic array (sample last dimension)
:param wl: window length (secs)
:param si: sampling interval (secs)
:param epsilon: whitening (useful mainly for synthetic data)
:param gpu: bool
:return: AGC data array, gain applied to data
"""
if gpu:
import cupy as gp
else:
gp = np
ns_win = int(gp.round(wl / si / 2) * 2 + 1)
w = gp.hanning(ns_win)
w /= gp.sum(w)
gain = fourier.convolve(gp.abs(x), w, mode='same', gpu=gpu)
gain += (gp.sum(gain, axis=1) * epsilon / x.shape[-1])[:, gp.newaxis]
dead_channels = np.sum(gain, axis=1) == 0
x[~dead_channels, :] = x[~dead_channels, :] / gain[~dead_channels, :]
if gpu:
return (x * gain).astype('float32'), gain.astype('float32')
return x, gain
def fk(x, si=.002, dx=1, vbounds=None, btype='highpass', ntr_pad=0, ntr_tap=None, lagc=.5,
collection=None, kfilt=None):
"""Frequency-wavenumber filter: filters apparent plane-waves velocity
:param x: the input array to be filtered. dimension, the filtering is considering
axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns)
:param si: sampling interval (secs)
:param dx: spatial interval (usually meters)
:param vbounds: velocity high pass [v1, v2], cosine taper from 0 to 1 between v1 and v2
:param btype: {‘lowpass’, ‘highpass’}, velocity filter : defaults to highpass
:param ntr_pad: padding will add ntr_padd mirrored traces to each side
:param ntr_tap: taper (if None, set to ntr_pad)
:param lagc: length of agc in seconds. If set to None or 0, no agc
:param kfilt: optional (None) if kfilter is applied, parameters as dict (bounds are in m-1
according to the dx parameter) kfilt = {'bounds': [0.05, 0.1], 'btype', 'highpass'}
:param collection: vector length ntraces. Each unique value set of traces is a collection
on which the FK filter will run separately (shot gaters, receiver gathers)
:return:
"""
if collection is not None:
xout = np.zeros_like(x)
for c in np.unique(collection):
sel = collection == c
xout[sel, :] = fk(x[sel, :], si=si, dx=dx, vbounds=vbounds, ntr_pad=ntr_pad,
ntr_tap=ntr_tap, lagc=lagc, collection=None)
return xout
assert vbounds
nx, nt = x.shape
# lateral padding left and right
ntr_pad = int(ntr_pad)
ntr_tap = ntr_pad if ntr_tap is None else ntr_tap
nxp = nx + ntr_pad * 2
# compute frequency wavenumber scales and deduce the velocity filter
fscale = fourier.fscale(nt, si)
kscale = fourier.fscale(nxp, dx)
kscale[0] = 1e-6
v = fscale[np.newaxis, :] / kscale[:, np.newaxis]
if btype.lower() in ['highpass', 'hp']:
fk_att = fourier.fcn_cosine(vbounds)(np.abs(v))
elif btype.lower() in ['lowpass', 'lp']:
fk_att = (1 - fourier.fcn_cosine(vbounds)(np.abs(v)))
# if a k-filter is also provided, apply it
if kfilt is not None:
katt = fourier._freq_vector(np.abs(kscale), kfilt['bounds'], typ=kfilt['btype'])
fk_att *= katt[:, np.newaxis]
# import matplotlib.pyplot as plt
# plt.imshow(np.fft.fftshift(np.abs(v), axes=0).T, aspect='auto', vmin=0, vmax=1e5,
# extent=[np.min(kscale), np.max(kscale), 0, np.max(fscale) * 2])
# plt.imshow(np.fft.fftshift(np.abs(fk_att), axes=0).T, aspect='auto', vmin=0, vmax=1,
# extent=[np.min(kscale), np.max(kscale), 0, np.max(fscale) * 2])
# apply the attenuation in fk-domain
if not lagc:
xf = np.copy(x)
gain = 1
else:
xf, gain = agc(x, wl=lagc, si=si)
if ntr_pad > 0:
# pad the array with a mirrored version of itself and apply a cosine taper
xf = np.r_[np.flipud(xf[:ntr_pad]), xf, np.flipud(xf[-ntr_pad:])]
if ntr_tap > 0:
taper = fourier.fcn_cosine([0, ntr_tap])(np.arange(nxp)) # taper up
taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp])(np.arange(nxp)) # taper down
xf = xf * taper[:, np.newaxis]
xf = np.real(np.fft.ifft2(fk_att * np.fft.fft2(xf)))
if ntr_pad > 0:
xf = xf[ntr_pad:-ntr_pad, :]
return xf * gain
def car(x, collection=None, lagc=300, butter_kwargs=None, **kwargs):
"""
Applies common average referencing with optional automatic gain control
:param x: the input array to be filtered. dimension, the filtering is considering
axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns)
:param collection:
:param lagc: window size for time domain automatic gain control (no agc otherwise)
:param butter_kwargs: filtering parameters: defaults: {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
:return:
"""
if butter_kwargs is None:
butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
if collection is not None:
xout = np.zeros_like(x)
for c in np.unique(collection):
sel = collection == c
xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None,
butter_kwargs=butter_kwargs)
return xout
# apply agc and keep the gain in handy
if not lagc:
xf = np.copy(x)
gain = 1
else:
xf, gain = agc(x, wl=lagc, si=1.0)
# apply CAR and then un-apply the gain
xf = xf - np.median(xf, axis=0)
return xf * gain
def kfilt(x, collection=None, ntr_pad=0, ntr_tap=None, lagc=300, butter_kwargs=None, gpu=False):
"""
Applies a butterworth filter on the 0-axis with tapering / padding
:param x: the input array to be filtered. dimension, the filtering is considering
axis=0: spatial dimension, axis=1 temporal dimension. (ntraces, ns)
:param collection:
:param ntr_pad: traces added to each side (mirrored)
:param ntr_tap: n traces for apodizatin on each side
:param lagc: window size for time domain automatic gain control (no agc otherwise)
:param butter_kwargs: filtering parameters: defaults: {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
:param gpu: bool
:return:
"""
if gpu:
import cupy as gp
else:
gp = np
if butter_kwargs is None:
butter_kwargs = {'N': 3, 'Wn': 0.1, 'btype': 'highpass'}
if collection is not None:
xout = gp.zeros_like(x)
for c in gp.unique(collection):
sel = collection == c
xout[sel, :] = kfilt(x=x[sel, :], ntr_pad=0, ntr_tap=None, collection=None,
butter_kwargs=butter_kwargs)
return xout
nx, nt = x.shape
# lateral padding left and right
ntr_pad = int(ntr_pad)
ntr_tap = ntr_pad if ntr_tap is None else ntr_tap
nxp = nx + ntr_pad * 2
# apply agc and keep the gain in handy
if not lagc:
xf = gp.copy(x)
gain = 1
else:
xf, gain = agc(x, wl=lagc, si=1.0, gpu=gpu)
if ntr_pad > 0:
# pad the array with a mirrored version of itself and apply a cosine taper
xf = gp.r_[gp.flipud(xf[:ntr_pad]), xf, gp.flipud(xf[-ntr_pad:])]
if ntr_tap > 0:
taper = fourier.fcn_cosine([0, ntr_tap], gpu=gpu)(gp.arange(nxp)) # taper up
taper *= 1 - fourier.fcn_cosine([nxp - ntr_tap, nxp], gpu=gpu)(gp.arange(nxp)) # taper down
xf = xf * taper[:, gp.newaxis]
sos = scipy.signal.butter(**butter_kwargs, output='sos')
if gpu:
from .filter_gpu import sosfiltfilt_gpu
xf = sosfiltfilt_gpu(sos, xf, axis=0)
else:
xf = scipy.signal.sosfiltfilt(sos, xf, axis=0)
if ntr_pad > 0:
xf = xf[ntr_pad:-ntr_pad, :]
return xf * gain
def interpolate_bad_channels(data, channel_labels=None, x=None, y=None, p=1.3, kriging_distance_um=20, gpu=False):
"""
Interpolate the channel labeled as bad channels using linear interpolation.
The weights applied to neighbouring channels come from an exponential decay function
:param data: (nc, ns) np.ndarray
:param channel_labels; (nc) np.ndarray: 0: channel is good, 1: dead, 2:noisy, 3: out of the brain
:param x: channel x-coordinates, np.ndarray
:param y: channel y-coordinates, np.ndarray
:param p:
:param kriging_distance_um:
:param gpu: bool
:return:
"""
if gpu:
import cupy as gp
else:
gp = np
# from ibllib.plots.figures import ephys_bad_channels
# ephys_bad_channels(x, 30000, channel_labels[0], channel_labels[1])
# we interpolate only noisy channels or dead channels (0: good), out of the brain channels are left
bad_channels = gp.where(np.logical_or(channel_labels == 1, channel_labels == 2))[0]
for i in bad_channels:
# compute the weights to apply to neighbouring traces
offset = gp.abs(x - x[i] + 1j * (y - y[i]))
weights = gp.exp(-(offset / kriging_distance_um) ** p)
weights[bad_channels] = 0
weights[weights < 0.005] = 0
weights = weights / gp.sum(weights)
imult = gp.where(weights > 0.005)[0]
if imult.size == 0:
data[i, :] = 0
continue
data[i, :] = gp.matmul(weights[imult], data[imult, :])
# from viewephys.gui import viewephys
# f = viewephys(data.T, fs=1/30, h=h, title='interp2')
return data
def _get_destripe_parameters(fs, butter_kwargs, k_kwargs, k_filter):
"""gets the default params for destripe. This is used for both the destripe fcn on a
numpy array and the function that actuates on a cbin file"""
if butter_kwargs is None:
butter_kwargs = {'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}
if k_kwargs is None:
lagc = None if fs < 3000 else int(fs / 10)
k_kwargs = {'ntr_pad': 60, 'ntr_tap': 0, 'lagc': lagc,
'butter_kwargs': {'N': 3, 'Wn': 0.01, 'btype': 'highpass'}}
if k_filter:
spatial_fcn = lambda dat: kfilt(dat, **k_kwargs) # noqa
else:
spatial_fcn = lambda dat: car(dat, **k_kwargs) # noqa
return butter_kwargs, k_kwargs, spatial_fcn
def destripe(x, fs, h=None, neuropixel_version=1, butter_kwargs=None, k_kwargs=None, channel_labels=None, k_filter=True):
"""Super Car (super slow also...) - far from being set in stone but a good workflow example
:param x: demultiplexed array (nc, ns)
:param fs: sampling frequency
:param neuropixel_version (optional): 1 or 2. Useful for the ADC shift correction. If None,
no correction is applied
:param channel_labels:
None: (default) keep all channels
OR (recommended to pre-compute)
index array for the first axis of x indicating the selected traces.
On a full workflow, one should scan sparingly the full file to get a robust estimate of the
selection. If None, and estimation is done using only the current batch is provided for
convenience but should be avoided in production.
OR (only for quick display or as an example)
True: deduces the bad channels from the data provided
:param butter_kwargs: (optional, None) butterworth params, see the code for the defaults dict
:param k_kwargs: (optional, None) K-filter params, see the code for the defaults dict
can also be set to 'car', in which case the median accross channels will be subtracted
:param k_filter (True): applies k-filter by default, otherwise, apply CAR.
:return: x, filtered array
"""
butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters(fs, butter_kwargs, k_kwargs, k_filter)
if h is None:
h = neuropixel.trace_header(version=neuropixel_version)
if channel_labels is True:
channel_labels, _ = detect_bad_channels(x, fs)
# butterworth
sos = scipy.signal.butter(**butter_kwargs, output='sos')
x = scipy.signal.sosfiltfilt(sos, x)
# channel interpolation
# apply ADC shift
if neuropixel_version is not None:
x = fourier.fshift(x, h['sample_shift'], axis=1)
# apply spatial filter only on channels that are inside of the brain
if channel_labels is not None:
x = interpolate_bad_channels(x, channel_labels, h['x'], h['y'])
inside_brain = np.where(channel_labels != 3)[0]
x[inside_brain, :] = spatial_fcn(x[inside_brain, :]) # apply the k-filter
else:
x = spatial_fcn(x)
return x
def destripe_lfp(x, fs, channel_labels=None, **kwargs):
"""
Wrapper around the destipe function with some default parameters to destripe the LFP band
See help destripe function for documentation
:param x:
:param fs:
:return:
"""
kwargs['butter_kwargs'] = {'N': 3, 'Wn': 2 / fs * 2, 'btype': 'highpass'}
kwargs['k_filter'] = False
if channel_labels is True:
kwargs['channel_labels'], _ = detect_bad_channels(x, fs=fs, psd_hf_threshold=1.4)
return destripe(x, fs, **kwargs)
def decompress_destripe_cbin(sr_file, output_file=None, h=None, wrot=None, append=False, nc_out=None, butter_kwargs=None,
dtype=np.int16, ns2add=0, nbatch=None, nprocesses=None, compute_rms=True, reject_channels=True,
k_kwargs=None, k_filter=True, reader_kwargs=None, output_qc_path=None):
"""
From a spikeglx Reader object, decompresses and apply ADC.
Saves output as a flat binary file in int16
Production version with optimized FFTs - requires pyfftw
:param sr: seismic reader object (spikeglx.Reader)
:param output_file: (optional, defaults to .bin extension of the compressed bin file)
:param h: (optional) neuropixel trace header. Dictionary with key 'sample_shift'
:param wrot: (optional) whitening matrix [nc x nc] or amplitude scalar to apply to the output
:param append: (optional, False) for chronic recordings, append to end of file
:param nc_out: (optional, True) saves non selected channels (synchronisation trace) in output
:param butterworth filter parameters: {'N': 3, 'Wn': 300 / sr.fs * 2, 'btype': 'highpass'}
:param dtype: (optional, np.int16) output sample format
:param ns2add: (optional) for kilosort, adds padding samples at the end of the file so the total
number of samples is a multiple of the batchsize
:param nbatch: (optional) batch size
:param nprocesses: (optional) number of parallel processes to run, defaults to number or processes detected with joblib
interp 3:outside of brain and discard
:param reject_channels: (True) detects noisy or bad channels and interpolate them. Channels outside of the brain are left
untouched
:param k_kwargs: (None) arguments for the kfilter function
:param reader_kwargs: (None) optional arguments for the spikeglx Reader instance
:param k_filter: (True) Performs a k-filter - if False will do median common average referencing
:param output_qc_path: (None) if specified, will save the QC rms in a different location than the output
:return:
"""
import pyfftw
SAMPLES_TAPER = 1024
NBATCH = nbatch or 65536
# handles input parameters
reader_kwargs = {} if reader_kwargs is None else reader_kwargs
sr = spikeglx.Reader(sr_file, open=True, **reader_kwargs)
if reject_channels: # get bad channels if option is on
channel_labels = detect_bad_channels_cbin(sr)
assert isinstance(sr_file, str) or isinstance(sr_file, Path)
butter_kwargs, k_kwargs, spatial_fcn = _get_destripe_parameters(sr.fs, butter_kwargs, k_kwargs, k_filter)
h = sr.geometry if h is None else h
ncv = h['sample_shift'].size # number of channels
output_file = sr.file_bin.with_suffix('.bin') if output_file is None else Path(output_file)
assert output_file != sr.file_bin
taper = np.r_[0, scipy.signal.windows.cosine((SAMPLES_TAPER - 1) * 2), 0]
# create the FFT stencils
nc_out = nc_out or sr.nc
# compute LP filter coefficients
sos = scipy.signal.butter(**butter_kwargs, output='sos')
nbytes = dtype(1).nbytes
nprocesses = nprocesses or int(cpu_count() - cpu_count() / 4)
win = pyfftw.empty_aligned((ncv, NBATCH), dtype='float32')
WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype='complex64')
fft_object = pyfftw.FFTW(win, WIN, axes=(1,), direction='FFTW_FORWARD', threads=4)
dephas = np.zeros((ncv, NBATCH), dtype=np.float32)
dephas[:, 1] = 1.
DEPHAS = np.exp(1j * np.angle(fft_object(dephas)) * h['sample_shift'][:, np.newaxis])
# if we want to compute the rms ap across the session
if compute_rms:
ap_rms_file = output_file.parent.joinpath('ap_rms.bin')
ap_time_file = output_file.parent.joinpath('ap_time.bin')
rms_nbytes = np.float32(1).nbytes
if append:
rms_offset = Path(ap_rms_file).stat().st_size
time_offset = Path(ap_time_file).stat().st_size
with open(ap_time_file, 'rb') as tid:
t = tid.read()
time_data = np.frombuffer(t, dtype=np.float32)
t0 = time_data[-1]
else:
rms_offset = 0
time_offset = 0
t0 = 0
open(ap_rms_file, 'wb').close()
open(ap_time_file, 'wb').close()
if append:
# need to find the end of the file and the offset
offset = Path(output_file).stat().st_size
else:
offset = 0
open(output_file, 'wb').close()
# chunks to split the file into, dependent on number of parallel processes
CHUNK_SIZE = int(sr.ns / nprocesses)
def my_function(i_chunk, n_chunk):
_sr = spikeglx.Reader(sr_file, **reader_kwargs)
n_batch = int(np.ceil(i_chunk * CHUNK_SIZE / NBATCH))
first_s = (NBATCH - SAMPLES_TAPER * 2) * n_batch
# Find the maximum sample for each chunk
max_s = _sr.ns if i_chunk == n_chunk - 1 else (i_chunk + 1) * CHUNK_SIZE
# need to redefine this here to avoid 4 byte boundary error
win = pyfftw.empty_aligned((ncv, NBATCH), dtype='float32')
WIN = pyfftw.empty_aligned((ncv, int(NBATCH / 2 + 1)), dtype='complex64')
fft_object = pyfftw.FFTW(win, WIN, axes=(1,), direction='FFTW_FORWARD', threads=4)
ifft_object = pyfftw.FFTW(WIN, win, axes=(1,), direction='FFTW_BACKWARD', threads=4)
fid = open(output_file, 'r+b')
if i_chunk == 0:
fid.seek(offset)
else:
fid.seek(offset + ((first_s + SAMPLES_TAPER) * nc_out * nbytes))
if compute_rms:
aid = open(ap_rms_file, 'r+b')
tid = open(ap_time_file, 'r+b')
if i_chunk == 0:
aid.seek(rms_offset)
tid.seek(time_offset)
else:
aid.seek(rms_offset + (n_batch * ncv * rms_nbytes))
tid.seek(time_offset + (n_batch * rms_nbytes))
while True:
last_s = np.minimum(NBATCH + first_s, _sr.ns)
# Apply tapers
chunk = _sr[first_s:last_s, :ncv].T
chunk[:, :SAMPLES_TAPER] *= taper[:SAMPLES_TAPER]
chunk[:, -SAMPLES_TAPER:] *= taper[SAMPLES_TAPER:]
# Apply filters
chunk = scipy.signal.sosfiltfilt(sos, chunk)
# Find the indices to save
ind2save = [SAMPLES_TAPER, NBATCH - SAMPLES_TAPER]
if last_s == _sr.ns:
# for the last batch just use the normal fft as the stencil doesn't fit
chunk = fourier.fshift(chunk, s=h['sample_shift'])
ind2save[1] = NBATCH
else:
# apply precomputed fshift of the proper length
chunk = ifft_object(fft_object(chunk) * DEPHAS)
if first_s == 0:
# for the first batch save the start with taper applied
ind2save[0] = 0
# interpolate missing traces after the low-cut filter it's important to leave the
# channels outside of the brain outside of the computation
if reject_channels:
chunk = interpolate_bad_channels(chunk, channel_labels, h['x'], h['y'])
inside_brain = np.where(channel_labels != 3)[0]
chunk[inside_brain, :] = spatial_fcn(chunk[inside_brain, :]) # apply the k-filter / CAR
else:
chunk = spatial_fcn(chunk) # apply the k-filter / CAR
# add back sync trace and save
chunk = np.r_[chunk, _sr[first_s:last_s, ncv:].T].T
# Compute rms - we get it before applying the whitening
if compute_rms:
ap_rms = utils.rms(chunk[:, :ncv], axis=0)
ap_t = t0 + (first_s + (last_s - first_s - 1) / 2) / _sr.fs
ap_rms.astype(np.float32).tofile(aid)
ap_t.astype(np.float32).tofile(tid)
# convert to normalised
intnorm = 1 / _sr.sample2volts
chunk = chunk[slice(*ind2save), :] * intnorm
# apply the whitening matrix if necessary
if wrot is not None:
chunk[:, :ncv] = np.dot(chunk[:, :ncv], wrot)
chunk[:, :nc_out].astype(dtype).tofile(fid)
first_s += NBATCH - SAMPLES_TAPER * 2
if last_s >= max_s:
if last_s == _sr.ns:
if ns2add > 0:
np.tile(chunk[-1, :nc_out].astype(dtype), (ns2add, 1)).tofile(fid)
fid.close()
if compute_rms:
aid.close()
tid.close()
break
_ = Parallel(n_jobs=nprocesses)(delayed(my_function)(i, nprocesses) for i in range(nprocesses))
sr.close()
# Here convert the ap_rms bin files to the ibl format and save
if compute_rms:
with open(ap_rms_file, 'rb') as aid, open(ap_time_file, 'rb') as tid:
rms_data = aid.read()
time_data = tid.read()
time_data = np.frombuffer(time_data, dtype=np.float32)
rms_data = np.frombuffer(rms_data, dtype=np.float32)
assert (rms_data.shape[0] == time_data.shape[0] * ncv)
rms_data = rms_data.reshape(time_data.shape[0], ncv)
output_qc_path = output_qc_path or output_file.parent
np.save(output_qc_path.joinpath('_iblqc_ephysTimeRmsAP.rms.npy'), rms_data)
np.save(output_qc_path.joinpath('_iblqc_ephysTimeRmsAP.timestamps.npy'), time_data)
def detect_bad_channels(raw, fs, similarity_threshold=(-0.5, 1), psd_hf_threshold=None):
"""
Bad channels detection for Neuropixel probes
Labels channels
0: all clear
1: dead low coherence / amplitude
2: noisy
3: outside of the brain
:param raw: [nc, ns]
:param fs: sampling frequency
:param similarity_threshold:
:param psd_hf_threshold:
:return: labels (numpy vector [nc]), xfeats: dictionary of features [nc]
"""
def rneighbours(raw, n=1): # noqa
"""
Computes Pearson correlation with the sum of neighbouring traces
:param raw: nc, ns
:param n:
:return:
"""
nc = raw.shape[0]
mixer = np.triu(np.ones((nc, nc)), 1) - np.triu(np.ones((nc, nc)), 1 + n)
mixer += np.tril(np.ones((nc, nc)), -1) - np.tril(np.ones((nc, nc)), - n - 1)
r = rcoeff(raw, np.matmul(raw.T, mixer).T)
r[np.isnan(r)] = 0
return r
def detrend(x, nmed):
"""
Subtract the trend from a vector
The trend is a median filtered version of the said vector with tapering
:param x: input vector
:param nmed: number of points of the median filter
:return: np.array
"""
ntap = int(np.ceil(nmed / 2))
xf = np.r_[np.zeros(ntap) + x[0], x, np.zeros(ntap) + x[-1]]
# assert np.all(xcorf[ntap:-ntap] == xcor)
xf = scipy.signal.medfilt(xf, nmed)[ntap:-ntap]
return x - xf
def channels_similarity(raw, nmed=0):
"""
Computes the similarity based on zero-lag crosscorrelation of each channel with the median
trace referencing
:param raw: [nc, ns]
:param nmed:
:return:
"""
def fxcor(x, y):
return scipy.fft.irfft(scipy.fft.rfft(x) * np.conj(scipy.fft.rfft(y)), n=raw.shape[-1])
def nxcor(x, ref):
ref = ref - np.mean(ref)
apeak = fxcor(ref, ref)[0]
x = x - np.mean(x, axis=-1)[:, np.newaxis] # remove DC component
return fxcor(x, ref)[:, 0] / apeak
ref = np.median(raw, axis=0)
xcor = nxcor(raw, ref)
if nmed > 0:
xcor = detrend(xcor, nmed) + 1
return xcor
nc, _ = raw.shape
raw = raw - np.mean(raw, axis=-1)[:, np.newaxis] # removes DC offset
xcor = channels_similarity(raw)
fscale, psd = scipy.signal.welch(raw * 1e6, fs=fs) # units; uV ** 2 / Hz
if psd_hf_threshold is None:
# the LFP band data is obviously much stronger so auto-adjust the default threshold
psd_hf_threshold = 1.4 if fs < 5000 else 0.02
sos_hp = scipy.signal.butter(**{'N': 3, 'Wn': 300 / fs * 2, 'btype': 'highpass'}, output='sos')
hf = scipy.signal.sosfiltfilt(sos_hp, raw)
xcorf = channels_similarity(hf)
xfeats = ({
'ind': np.arange(nc),
'rms_raw': utils.rms(raw), # very similar to the rms avfter butterworth filter
'xcor_hf': detrend(xcor, 11),
'xcor_lf': xcorf - detrend(xcorf, 11) - 1,
'psd_hf': np.mean(psd[:, fscale > (fs / 2 * 0.8)], axis=-1), # 80% nyquists
})
# make recommendation
ichannels = np.zeros(nc)
idead = np.where(similarity_threshold[0] > xfeats['xcor_hf'])[0]
inoisy = np.where(np.logical_or(xfeats['psd_hf'] > psd_hf_threshold, xfeats['xcor_hf'] > similarity_threshold[1]))[0]
# the channels outside of the brains are the contiguous channels below the threshold on the trend coherency
ioutside = np.where(xfeats['xcor_lf'] < -0.75)[0]
if ioutside.size > 0 and ioutside[-1] == (nc - 1):
a = np.cumsum(np.r_[0, np.diff(ioutside) - 1])
ioutside = ioutside[a == np.max(a)]
ichannels[ioutside] = 3
# indices
ichannels[idead] = 1
ichannels[inoisy] = 2
# from ibllib.plots.figures import ephys_bad_channels
# ephys_bad_channels(x, 30000, ichannels, xfeats)
return ichannels, xfeats
def detect_bad_channels_cbin(bin_file, n_batches=10, batch_duration=0.3, display=False):
"""
Runs a ap-binary file scan to automatically detect faulty channels
:param bin_file: full file path to the binary or compressed binary file from spikeglx
:param n_batches: number of batches throughout the file (defaults to 10)
:param batch_duration: batch length in seconds, defaults to 0.3
:param display: if True will return a figure with features and an excerpt of the raw data
:return: channel_labels: nc int array with 0:ok, 1:dead, 2:high noise, 3:outside of the brain
"""
sr = bin_file if isinstance(bin_file, spikeglx.Reader) else spikeglx.Reader(bin_file)
nc = sr.nc - sr.nsync
channel_labels = np.zeros((nc, n_batches))
# loop over the file and take the mode of detections
for i, t0 in enumerate(np.linspace(0, sr.rl - batch_duration, n_batches)):
sl = slice(int(t0 * sr.fs), int((t0 + batch_duration) * sr.fs))
channel_labels[:, i], _xfeats = detect_bad_channels(sr[sl, :nc].T, fs=sr.fs)
if i == 0: # init the features dictionary if necessary
xfeats = {k: np.zeros((nc, n_batches)) for k in _xfeats}
for k in xfeats:
xfeats[k][:, i] = _xfeats[k]
# the features are averaged so there may be a discrepancy between the mode and applying
# the thresholds to the average of the features - the goal of those features is for display only
xfeats_med = {k: np.median(xfeats[k], axis=-1) for k in xfeats}
channel_flags, _ = scipy.stats.mode(channel_labels, axis=1)
if display:
raw = sr[sl, :nc].TO
from ibllib.plots.figures import ephys_bad_channels
ephys_bad_channels(raw, sr.fs, channel_flags, xfeats_med)
return channel_flags
def resample_denoise_lfp_cbin(lf_file, RESAMPLE_FACTOR=10, output=None):
"""
Downsamples an LFP file and apply dstriping
```
nc = 384
ns = int(lf_file_out.stat().st_size / nc / 4)
sr_ = spikeglx.Reader(lf_file_out, nc=nc, fs=sr.fs / RESAMPLE_FACTOR, ns=ns, dtype=np.float32)
```
:param lf_file:
:param RESAMPLE_FACTOR:
:param output: Path
:return: None
"""
output = output or Path(lf_file).parent.joinpath('lf_resampled.bin')
sr = spikeglx.Reader(lf_file)
wg = utils.WindowGenerator(ns=sr.ns, nswin=65536, overlap=1024)
cflags = detect_bad_channels_cbin(lf_file)
c = 0
with open(output, 'wb') as f:
for first, last in wg.firstlast:
butter_kwargs = {'N': 3, 'Wn': np.array([2, 200]) / sr.fs * 2, 'btype': 'bandpass'}
sos = scipy.signal.butter(**butter_kwargs, output='sos')
raw = sr[first:last, :-sr.nsync]
raw = scipy.signal.sosfiltfilt(sos, raw, axis=0)
destripe = destripe_lfp(raw.T, fs=sr.fs, channel_labels=cflags)
# viewephys(raw.T, fs=sr.fs, title='raw')
# viewephys(destripe, fs=sr.fs, title='destripe')
rsamp = scipy.signal.decimate(destripe, RESAMPLE_FACTOR, axis=1, ftype='fir').T
# viewephys(rsamp, fs=sr.fs / RESAMPLE_FACTOR, title='rsamp')
first_valid = 0 if first == 0 else int(wg.overlap / 2 / RESAMPLE_FACTOR)
last_valid = rsamp.shape[0] if last == sr.ns else int(rsamp.shape[0] - wg.overlap / 2 / RESAMPLE_FACTOR)
rsamp = rsamp[first_valid:last_valid, :]
c += rsamp.shape[0]
print(first, last, last - first, first_valid, last_valid, c)
rsamp.astype(np.float32).tofile(f)
# first, last = (500, 550)
# viewephys(sr[int(first * sr.fs) : int(last * sr.fs), :-sr.nsync].T, sr.fs, title='orig')
# viewephys(sr_[int(first * sr_.fs):int(last * sr_.fs), :].T, sr_.fs, title='rsamp')
def stack(data, word, fcn_agg=np.nanmean, header=None):
"""
Stack numpy array traces according to the word vector
:param data: (ntr, ns) numpy array of sample values
:param word: (ntr) label according to which the traces will be aggregated (usually cdp)
:param header: dictionary of vectors (ntr): header labels, will be aggregated as average
:param fcn_agg: function, defaults to np.mean but could be np.sum or np.median
:return: stack (ntr_stack, ns): aggregated numpy array
header ( ntr_stack): aggregated header. If no header is provided, fold of coverage
"""
(ntr, ns) = data.shape
group, uinds, fold = np.unique(word, return_inverse=True, return_counts=True)
ntrs = group.size
stack = np.zeros((ntrs, ns), dtype=data.dtype)
for sind in np.arange(ntrs):
i2stack = sind == uinds
stack[sind, :] = fcn_agg(data[i2stack, :], axis=0)
# aggregate the header using pandas
if header is None:
hstack = fold
else:
header['stack_word'] = word
dfh = pd.DataFrame(header).groupby('stack_word')
hstack = dfh.aggregate('mean').to_dict(orient='series')
hstack = {k: hstack[k].values for k in hstack.keys()}
hstack['fold'] = fold
return stack, hstack
def current_source_density(lfp, h, method='diff', sigma=1 / 3):
"""
Compute the current source density (CSD) of a given LFP signal recorded on neuropixel 1 or 2
:param data: LFP signal (n_channels, n_samples)
:param h: trace header dictionary
:param method: diff (straight double difference) or kernel CSD (needs the KCSD python package)
:param sigma: conductivity, defaults to 1/3 S.m-1
:return:
"""
csd = np.zeros(lfp.shape, dtype=np.float64) * np.NAN
xy = h['x'] + 1j * h['y']
for col in np.unique(h['col']):
ind = np.where(h['col'] == col)[0]
isort = np.argsort(h['row'][ind])
itr = ind[isort]
dx = np.median(np.diff(np.abs(xy[itr])))
if method == 'diff':
csd[itr[1:-1], :] = np.diff(lfp[itr, :].astype(np.float64), n=2, axis=0) / dx ** 2 * sigma
csd[itr[0], :] = csd[itr[1], :]
csd[itr[-1], :] = csd[itr[-2], :]
elif method == 'kcsd':
from kcsd import KCSD1D
# here we could eventually expose the KCSD kwargs
csd[itr, :] = KCSD1D(
h['y'][itr, np.newaxis],
lfp[itr, :],
h=np.median(np.diff(h['y'][ind])), # this seems to work well with the current intertrace
sigma=sigma,
xmin=np.min(h['y'][itr]),
xmax=np.max(h['y'][itr]),
gdx=np.ceil((np.max(h['y'][itr]) - np.min(h['y'][itr])) / itr.size),
lambd=0.,
R_init=5.,
n_src_init=10000,
src_type='gauss').values('CSD')
return csd
|
#python3
import sys
def money_change_greedy_util(money, coins):
count = 0
for coin in coins:
if(money <= 0):
break
if(money >= coin):
count += money // coin
money = money % coin
return count
def money_change_greedy(money):
return money_change_greedy_util(money, (4,3,1))
def money_change_rec_util(money, coins):
if(money == 0):
return 0
min_coins = 9999999999
for coin in coins:
if(money >= coin):
num_coins = money_change_rec_util(money - coin, coins) + 1
if(num_coins < min_coins):
min_coins = num_coins
return min_coins
def money_change_rec(money):
return money_change_rec_util(money, (4,3,1))
def money_change_dynamic_util(money, coins):
if(money <= 0):
return 0
min_coins = [0] + [9999999999]*money
for i in range(1, money+1):
for coin in coins:
if(i >= coin):
temp_coin = min_coins[i-coin]+1
if(temp_coin < min_coins[i]):
min_coins[i] = temp_coin
return min_coins[money]
def money_change_dynamic(money):
return money_change_dynamic_util(money, (1,3,4))
if __name__ == "__main__":
# test trigger
if(len(sys.argv) == 2):
if(sys.argv[1] == '-t'):
from Stress_Test import Test
test = Test(test_func = money_change_rec,
solution_func = money_change_dynamic,
iterations = 100)
test.run()
else:
size = int(input())
print("{}".format(money_change_dynamic(size)))
'''
2
34
''' |
import unittest
from tdasm import Runtime
from sdl import Shader, StructArg, IntArg, Ray, Vector3
from renlgt.sphere import Sphere
from renlgt.hitpoint import HitPoint
from renlgt.shp_mgr import ShapeManager
from renlgt.linear import LinearIsect
class LinearIsectTests(unittest.TestCase):
def test_linear(self):
sphere = Sphere(Vector3(0.0, 0.0, 0.0), 2.0, 0)
mgr = ShapeManager()
mgr.add('sph1', sphere)
sphere2 = Sphere(Vector3(0.0, 2.0, 0.0), 3.0, 0)
mgr.add('sph2', sphere2)
isector = LinearIsect(mgr)
runtimes = [Runtime()]
direction = Vector3(-1.0, -1.0, -1.0)
direction.normalize()
ray = Ray(Vector3(5.0, 5.0, 5.0), direction)
isector.compile()
isector.prepare(runtimes)
code = """
min_dist = 99999.0
p1 = isect_scene(ray, hitpoint, min_dist)
"""
direction = Vector3(-1.0, -1.0, -1.0)
direction.normalize()
ray = Ray(Vector3(5.0, 5.0, 5.0), direction)
hitpoint = HitPoint(0.0, Vector3(0.0, 0.0, 0.0),
Vector3(0.0, 0.0, 0.0), 6, 0.0, 0.0)
r_arg = StructArg('ray', ray)
harg = StructArg('hitpoint', hitpoint)
p1 = IntArg('p1', 6)
args = [r_arg, harg, p1]
shader = Shader(code=code, args=args)
shader.compile([isector.shader])
shader.prepare(runtimes)
hp2 = isector.isect(ray)
shader.execute()
hitpoint = shader.get_value('hitpoint')
self.assertAlmostEqual(hp2.t, hitpoint.t, places=5)
self.assertEqual(hp2.mat_idx, hitpoint.mat_idx)
n1 = hp2.normal
n2 = hitpoint.normal
self.assertAlmostEqual(n1.x, n2.x)
self.assertAlmostEqual(n1.y, n2.y, places=6)
self.assertAlmostEqual(n1.z, n2.z)
self.assertAlmostEqual(hitpoint.hit.x, hp2.hit.x, places=6)
self.assertAlmostEqual(hitpoint.hit.y, hp2.hit.y, places=6)
self.assertAlmostEqual(hitpoint.hit.z, hp2.hit.z, places=6)
result = shader.get_value('p1')
self.assertEqual(result, 1)
def test_linear_visiblity(self):
sphere = Sphere(Vector3(0.0, 0.0, 0.0), 2.0, 0)
mgr = ShapeManager()
mgr.add('sph1', sphere)
sphere2 = Sphere(Vector3(0.0, 2.0, 0.0), 3.0, 0)
mgr.add('sph2', sphere2)
isector = LinearIsect(mgr)
runtimes = [Runtime()]
direction = Vector3(-1.0, -1.0, -1.0)
direction.normalize()
ray = Ray(Vector3(5.0, 5.0, 5.0), direction)
isector.compile()
isector.prepare(runtimes)
code = """
p1 = (9, 8, 7)
p2 = (-2, -5, -3)
ret = visibility(p1, p2)
"""
ret = IntArg('ret', 6)
args = [ret]
shader = Shader(code=code, args=args)
shader.compile([isector.visible_shader])
shader.prepare(runtimes)
p1 = Vector3(9.0, 8.0, 7.0)
p2 = Vector3(-2.0, -5.0, -3.0)
ret = isector.visibility(p1, p2)
shader.execute()
ret_s = shader.get_value('ret')
if ret is True and ret_s == 0:
raise ValueError("Linear visiblity is calculated wrong", ret, ret_s)
if ret is False and ret_s == 1:
raise ValueError("Linear visiblity is calculated wrong", ret, ret_s)
if __name__ == "__main__":
unittest.main()
|
from django.db import models
from django.contrib.auth.models import AbstractUser
# Create your models here.
class Author(models.Model):
name = models.CharField(max_length=20)
email = models.EmailField()
description = models.TextField()
def __str__(self):
return self.name
class Tag(models.Model):
"""博客分类"""
tag_name = models.CharField(max_length=20)
create_time = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.tag_name
# class Category(models.Model):
# name = models.CharField(max_length=30, verbose_name='分类名称')
# index = models.IntegerField(default=999, verbose_name='分类的排序')
#
# class Meta:
# verbose_name = '分类'
# verbose_name_plural = verbose_name
# ordering = ['index', 'id']
#
# def __str__(self):
# return self.name
class Blog(models.Model):
caption = models.CharField(max_length=50)
author = models.ForeignKey(Author, on_delete=models.DO_NOTHING) # 一对一外键,关联作者模型
tags = models.ManyToManyField(Tag, blank=True) # 多对多字段,绑定下面的Tag模型
content = models.TextField() # Text长文本字段,可以写很多内容
publish_time = models.DateTimeField(auto_now_add=True) # 日期,新增自动写入
update_time = models.DateTimeField(auto_now=True) # 日期,修改自动更新
class Meta:
ordering = ['-publish_time']
def __str__(self):
return self.caption
|
from django.contrib import admin
from login.models import UserProfile
@admin.register(UserProfile)
class UserProfileAdmin(admin.ModelAdmin):
list_display = ("pk", "user")
list_display_links = ("pk", "user")
search_fields = ("user__username",)
readonly_fields = ("user",)
|
import webbrowser
class Movie():
"""File - media.py
class Movie
This file used to create the movie blueprint.
There are 5 constructors for the movie object.These are: title,
poster_image_url, trailer_youtube_url, rating and movie storyline.
File - entertainment_center.py
In this file objects called Movie are instantiated. This file also
creates a list of the movie objects and passes them to the fresh_tomatoes.py
"""
#static list available to make it possible to show standard ratings
VALID_RATINGS = ["G","PG","PG-13","R"]
#init intantiates the container housing all the components for the webpage
def __init__(self, movie_title, movie_storyline, poster_image, trailer_youtube, rating):
self.title = movie_title
self.storyline = movie_storyline
self.poster_image_url = poster_image
self.trailer_youtube_url = trailer_youtube
self.rating = rating
#play_trailer open webbrowser and plays youtube_url
def play_trailer(self):
webbrowser.open(self.trailer_youtube_url)
|
import os
import asyncio # noqa: F401
import discord
import logging
from discord.ext import commands
from cogs.utils.dataIO import dataIO
from cogs.utils import checks
import json
import random
class Quote:
"""Simple quote cog"""
__author__ = "pitikay"
__version__ = "0.1"
def __init__(self, bot):
self.bot = bot
try:
with open('quotes.json', 'r') as f:
self.quotes = json.load(f)
except FileNotFoundError:
self.quotes = {}
async def on_reaction_add(self, reaction, user):
if reaction.emoji == "💾" and reaction.count == 1:
await self.add_quote(reaction.message, user)
if reaction.emoji == u"\U0001F5D1":
if reaction.count == 5:
await self.votedel_quote(reaction.message)
elif user.server_permissions.manage_roles:
await self.votedel_quote(reaction.message)
async def add_quote(self, message, user):
image = False
if(not message.clean_content):
if(message.attachments and message.attachments[0].get("width")):
image = True
else:
await self.bot.send_message(message.channel, "Heast, leere Zitate gehn net!")
return
quote = self.quote_from_message(message, user, image)
aid = quote["aid"]
qid = quote["qid"]
if(not self.quotes.get(aid)):
self.quotes[aid] = {}
if(self.quotes[aid].get(qid)):
await self.bot.send_message(message.channel, "Oida des hob i scho gspeichert!")
return
self.quotes[aid][qid] = quote
self.store_quotes()
await self.send_quote_to_channel(quote, message.channel)
async def votedel_quote(self, message):
if(not message.embeds):
return
footer = message.embeds[0].get("footer")
if(not footer or not footer.get("text")):
return
if(not (len(footer["text"].split()) > 1)):
return
qid = footer["text"].split()[1]
if(not qid.isdigit()):
return
await self.del_quote_by_id(qid, message.channel)
async def send_quote_to_channel(self, quote, channel):
em = self.gen_embed(quote, channel)
await self.bot.send_message(channel, embed=em)
@commands.command(name="quote", pass_context=True)
async def get_quote(self, ctx):
if(not self.quotes):
await self.bot.send_message(ctx.message.channel, "I hob no kane Zitate gspeichert.")
return
authorId = ctx.message.clean_content.replace("!quote", "", 1).strip()
#print(authorId)
if(ctx.message.mentions):
author = random.choice(ctx.message.mentions).id
if(not self.quotes.get(author)):
await self.bot.send_message(ctx.message.channel, "Der hot no nix deppates gsogt.")
return
elif authorId != "":
if authorId in self.quotes:
author = authorId
else:
# search quotes for passed id
for userId in self.quotes:
for quote in self.quotes[userId].values():
if quote["qid"] == authorId:
await self.send_quote_to_channel(quote, ctx.message.channel)
return
author = None
for userId in self.quotes:
if authorId.lower() == list(self.quotes[userId].values())[0]["author"].lower():
author = userId
break
if author is None:
await self.bot.send_message(ctx.message.channel, "I hob niemand mit dem Namen gfundn.")
return
else:
author = random.choice(list(self.quotes.keys()))
if(self.quotes[author].keys()):
entry = random.choice(list(self.quotes[author].keys()))
await self.send_quote_to_channel(self.quotes[author][entry], ctx.message.channel)
else:
await self.bot.send_message(ctx.message.channel, "I hob no kane Zitate gspeichert.")
@checks.admin_or_permissions(manage_roles=True)
@commands.command(name="delquote", pass_context=True)
async def del_quote(self, ctx):
message = ctx.message
channel = message.channel
qid = int(message.clean_content.replace("!delquote ", "", 1))
await self.del_quote_by_id(qid, channel)
async def del_quote_by_id(self, qid, channel):
for author in self.quotes.keys():
for q in self.quotes[author].keys():
if q == str(qid):
self.quotes[author].pop(q)
if(not self.quotes[author].keys()):
self.quotes.pop(author)
await self.bot.send_message(channel, "Zitat is glöscht!")
self.store_quotes()
return
await self.bot.send_message(channel, "Ka Zitat gfunden!")
def gen_embed(self, quote, channel):
member = discord.utils.find(lambda m: str(m.id) == quote.get("aid"), channel.server.members)
if(member):
author = member.display_name
else:
author = quote.get("author")
content = quote.get("content")
timestamp = quote.get("time")
avatar = quote.get("avatar")
adder = quote.get("adder")
quote_id = quote.get("qid")
if(quote.get("content")):
em = discord.Embed(description=content,
color=discord.Color.purple())
else:
em = discord.Embed(color=discord.Color.purple())
if(quote.get("image")):
em.set_image(url=quote["image"])
em.set_author(name='Zitat von {}'.format(author),
icon_url=avatar)
em.set_footer(text='Zitat {} hinzugfügt am {} UTC von {}'.format(quote_id, timestamp, adder))
return em
def quote_from_message(self, message, user, image=False):
quote = {}
quote["author"] = message.author.display_name
quote["aid"] = message.author.id
quote["adder"] = user.name
quote["content"] = message.clean_content
quote["qid"] = str(message.id)
quote["time"] = message.timestamp.strftime('%Y-%m-%d %H:%M')
author = message.author
quote["avatar"] = author.avatar_url if author.avatar \
else author.default_avatar_url
if(image):
quote["image"] = message.attachments[0]["url"]
return quote
def store_quotes(self):
with open('quotes.json', 'w') as out:
json.dump(self.quotes, out)
def setup(bot):
bot.add_cog(Quote(bot))
|
import os
import re
import subprocess
from Queue import Queue, Empty
from threading import Thread
import uuid
import os
import shutil
import sys
from celery import Celery
from celery.contrib import rdb
from ovirt_imageio_common import directio
from kombu import Queue as kqueue
import json
import random
import logging
import logging.config
log = logging.getLogger("server")
app = Celery('celery_tasks', backend='redis', broker='redis://localhost:6379/0')
'''app.conf.task_queues = (
kqueue('backup_tasks'),
kqueue('restore_tasks'),
)'''
#rdb.set_trace()
def is_blk_device(dev):
try:
if stat.S_ISBLK(os.stat(dev).st_mode):
return True
return False
except Exception:
print ('Path %s not found in is_blk_device check', dev)
return False
def check_for_odirect_support(src, dest, flag='oflag=direct'):
# Check whether O_DIRECT is supported
try:
nova_utils.execute('dd', 'count=0', 'if=%s' % src, 'of=%s' % dest,
flag, run_as_root=True)
return True
except processutils.ProcessExecutionError:
return False
def enqueue_output(out, queue):
line = out.read(17)
while line:
line = out.read(17)
queue.put(line)
out.close()
@app.task(bind=True, name="ovirt_imageio_daemon.celery_tasks.backup")
def backup(self, ticket_id, path, dest, size, type, buffer_size, recent_snap_id):
if type == "full":
cmdspec = [
'qemu-img',
'convert',
'-p',
]
cmdspec += ['-O', 'qcow2', path, dest]
cmd = " ".join(cmdspec)
print('Take a full snapshot with qemu-img convert cmd: %s ' % cmd)
process = subprocess.Popen(cmdspec,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1,
close_fds=True,
shell=False)
queue = Queue()
read_thread = Thread(target=enqueue_output,
args=(process.stdout, queue))
read_thread.daemon = True # thread dies with the program
read_thread.start()
percentage = 0.0
while process.poll() is None:
try:
try:
output = queue.get(timeout=300)
except Empty:
continue
except Exception as ex:
print(ex)
percentage = re.search(r'\d+\.\d+', output).group(0)
print(("copying from %(path)s to "
"%(dest)s %(percentage)s %% completed\n") %
{'path': path,
'dest': dest,
'percentage': str(percentage)})
percentage = float(percentage)
self.update_state(state='PENDING',
meta={'percentage': percentage})
except Exception as ex:
pass
'''qemu_cmd = ["qemu-img", "info", "--output", "json", dest]
temp_process = subprocess.Popen(qemu_cmd, stdout=subprocess.PIPE)
data, err = temp_process.communicate()
data = json.loads(data)
size = data["actual-size"]
process.stdin.close()
self.update_state(state='PENDING',
meta={'actual-size': size})'''
_returncode = process.returncode # pylint: disable=E1101
if _returncode:
print(('Result was %s' % _returncode))
raise Exception("Execution error %(exit_code)d (%(stderr)s). "
"cmd %(cmd)s" %
{'exit_code': _returncode,
'stderr': process.stderr.read(),
'cmd': cmd})
else:
process = subprocess.Popen('qemu-img info --backing-chain --output json ' + path, stdout=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
print(('Result was %s' % stderr))
raise Exception("Execution error %(exit_code)d (%(stderr)s). "
"cmd %(cmd)s" %
{'exit_code': 1,
'stderr': stderr,
'cmd': 'qemu-img info --backing-chain --output json ' + path})
result = json.loads(stdout)
first_record = result[0]
first_record_backing_file = first_record.get('backing-filename', None)
recent_snap_path = recent_snap_id.get(str(first_record_backing_file), None)
if first_record_backing_file and recent_snap_path:
op = directio.Send(path,
None,
size,
buffersize=buffer_size)
total = 0
print('Executing task id {0.id}, args: {0.args!r} kwargs: {0.kwargs!r}'.format(
self.request))
gigs = 0
with open(dest, "w+") as f:
for data in op:
total += len(data)
f.write(data)
if total/1024/1024/1024 > gigs:
gigs = total/1024/1024/1024
percentage = (total/size) * 100
self.update_state(state='PENDING',
meta={'percentage': percentage})
process = subprocess.Popen('qemu-img rebase -u -b ' + recent_snap_path + ' ' + dest, stdout=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
log.error("Unable to change the backing file", dest, stderr)
else:
temp_random_id = generate_random_string(5)
tempdir = '/var/triliovault-mounts/staging/' + temp_random_id
os.makedirs(tempdir)
commands = []
for record in result:
filename = os.path.basename(str(record.get('filename', None)))
recent_snap_path = recent_snap_id.get(str(record.get('backing-filename')), None)
if record.get('backing-filename', None) and str(record.get('backing-filename', None)) and not recent_snap_path:
try:
shutil.copy(path, tempdir)
backing_file = os.path.basename(str(record.get('backing-filename', None)))
command = 'qemu-img rebase -u -b ' + backing_file + ' ' + filename
commands.append(command)
except IOError as e:
print("Unable to copy file. %s" % e)
except:
print("Unexpected error:", sys.exc_info())
else:
try:
shutil.copy(path, tempdir)
command = 'qemu-img rebase -u ' + filename
commands.append(command)
except IOError as e:
print("Unable to copy file. %s" % e)
except:
print("Unexpected error:", sys.exc_info())
break
path = str(record.get('full-backing-filename'))
string_commands = ";".join(str(x) for x in commands)
process = subprocess.Popen(string_commands, stdin=subprocess.PIPE, stdout=subprocess.PIPE
, cwd=tempdir, shell=True)
stdout, stderr = process.communicate()
if stderr:
raise Exception(stdout)
cmdspec = [
'qemu-img',
'convert',
'-p',
]
filename = os.path.basename(str(first_record.get('filename', None)))
path = os.path.join(tempdir, filename)
cmdspec += ['-O', 'qcow2', path, dest]
process = subprocess.Popen(cmdspec,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1,
close_fds=True,
shell=False)
queue = Queue()
read_thread = Thread(target=enqueue_output,
args=(process.stdout, queue))
read_thread.daemon = True # thread dies with the program
read_thread.start()
percentage = 0.0
while process.poll() is None:
try:
try:
output = queue.get(timeout=300)
except Empty:
continue
except Exception as ex:
print(ex)
percentage = re.search(r'\d+\.\d+', output).group(0)
print(("copying from %(path)s to "
"%(dest)s %(percentage)s %% completed\n") %
{'path': path,
'dest': dest,
'percentage': str(percentage)})
percentage = float(percentage)
self.update_state(state='PENDING',
meta={'percentage': percentage})
except Exception as ex:
pass
if recent_snap_path:
process = subprocess.Popen('qemu-img rebase -u -b ' + recent_snap_path + ' ' + dest, stdout=subprocess.PIPE, shell=True)
stdout, stderr = process.communicate()
if stderr:
log.error("Unable to change the backing file", dest, stderr)
del_command = 'rm -rf ' + tempdir
delete_process = subprocess.Popen(del_command, shell=True, stdout=subprocess.PIPE)
delete_process.communicate()
@app.task(bind=True, name="ovirt_imageio_daemon.celery_tasks.restore")
def restore(self, ticket_id, volume_path, backup_image_file_path, size, buffer_size):
def transfer_qemu_image_to_volume(
volume_path,
backup_image_file_path):
cmdspec = [
'qemu-img',
'convert',
'-p',
]
if is_blk_device(volume_path) and \
check_for_odirect_support(backup_image_file_path,
volume_path, flag='oflag=direct'):
cmdspec += ['-t', 'none']
cmdspec += ['-O', 'qcow2', backup_image_file_path, volume_path]
default_cache = True
if default_cache is True:
if '-t' in cmdspec:
cmdspec.remove('-t')
if 'none' in cmdspec:
cmdspec.remove('none')
cmd = " ".join(cmdspec)
print('transfer_qemu_image_to_volume cmd %s ' % cmd)
process = subprocess.Popen(cmdspec,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
bufsize=-1,
close_fds=True,
shell=False)
queue = Queue()
read_thread = Thread(target=enqueue_output,
args=(process.stdout, queue))
read_thread.daemon = True # thread dies with the program
read_thread.start()
percentage = 0.0
while process.poll() is None:
try:
try:
output = queue.get(timeout=300)
except Empty:
continue
except Exception as ex:
print(ex)
percentage = re.search(r'\d+\.\d+', output).group(0)
print(("copying from %(backup_path)s to "
"%(volume_path)s %(percentage)s %% completed\n") %
{'backup_path': backup_image_file_path,
'volume_path': volume_path,
'percentage': str(percentage)})
percentage = float(percentage)
self.update_state(state='PENDING',
meta={'percentage': percentage})
except Exception as ex:
pass
process.stdin.close()
_returncode = process.returncode # pylint: disable=E1101
if _returncode:
print(('Result was %s' % _returncode))
raise Exception("Execution error %(exit_code)d (%(stderr)s). "
"cmd %(cmd)s" %
{'exit_code': _returncode,
'stderr': process.stderr.read(),
'cmd': cmd})
transfer_qemu_image_to_volume(volume_path, backup_image_file_path)
def generate_random_string(string_length=5):
"""Returns a random string of length string_length."""
random = str(uuid.uuid4())
random = random.upper()
random = random.replace("-","")
return random[0:string_length]
|
# str1 = input()
str1='abcdefghijklmnopqrstuvwxyz'
print(str1[2]) # третий символ этой строки;
print(str1[-2]) # предпоследний символ этой строки;
print(str1[:5]) # первые пять символов этой строки;
print(str1[:-2]) # всю строку, кроме последних двух символов;
print(str1[::2]) # все символы с четными индексами;
print(str1[1::2]) # все символы с нечетными индексами;
print(str1[::-1]) # все символы в обратном порядке;
print(str1[::-2]) # все символы строки через один в обратном порядке, начиная с последнего.
'''
Делаем срезы 2
На вход программе подается одна строка. Напишите программу, которая выводит:
третий символ этой строки;
предпоследний символ этой строки;
первые пять символов этой строки;
всю строку, кроме последних двух символов;
все символы с четными индексами;
все символы с нечетными индексами;
все символы в обратном порядке;
все символы строки через один в обратном порядке, начиная с последнего.
Формат входных данных
На вход программе подается одна строка, длина которой больше 5 символов.
Формат выходных данных
Программа должна вывести данные в соответствии с условием. Каждое значение выводится на отдельной строке.
Sample Input:
abcdefghijklmnopqrstuvwxyz
Sample Output:
c
y
abcde
abcdefghijklmnopqrstuvwx
acegikmoqsuwy
bdfhjlnprtvxz
zyxwvutsrqponmlkjihgfedcba
zxvtrpnljhfdb
Напишите программ
'''
|
from .firefly_task import Model
from .firefly_task import dynamics
#from .env_utils import pos_init
from .env_utils import *
#from .env_variables import *
"""
# these are for gym
from .gym_input import true_params
from gym.envs.registration import register
register(
id ='FireflyTorch-v0',
#entry_point ='FireflyEnv.firefly_gym:FireflyEnv',
entry_point='firefly_gym:FireflyEnv',
)
""" |
#!/usr/bin/env python
# coding: utf-8
from tests.common import TestCase
from tests.common import BASEDIR
from clocwalk.libs.detector.cvecpe import cpe_compare_version
class CPETestCase(TestCase):
def setUp(self):
pass
def test_compare(self):
self.assertTrue(cpe_compare_version(rule_version='2.9.0', rule_update='PRELEASE1', conf_version='2.9.0.pr1'))
self.assertFalse(cpe_compare_version(rule_version='2.9.0', rule_update='PRELEASE2', conf_version='2.9.0.pr1'))
self.assertFalse(cpe_compare_version(rule_version='2.9.0', rule_update='*', conf_version='2.9.0.pr1'))
|
# Description: Calculate yearly averages from monthly files.
#
# Author: André Palóczy
# E-mail: paloczy@gmail.com
# Date: January/2018
import numpy as np
import matplotlib
from glob import glob
from os import system
from datetime import datetime
from netCDF4 import Dataset, num2date
from pandas import Timestamp
from gsw import SA_from_SP, CT_from_pt
from gsw import alpha as falpha
from gsw import beta as fbeta
import xarray as xr
from ap_tools.utils import lon360to180, rot_vec
from reproducibility import savez
def deg2m_dist(lon, lat):
"""
USAGE
-----
dx, dy = deg2m_dist(lon, lat)
"""
lon, lat = map(np.array, (lon, lat))
dlat, _ = np.gradient(lat) # [deg]
_, dlon = np.gradient(lon) # [deg]
deg2m = 111120.0 # [m/deg]
# Account for divergence of meridians in zonal distance.
dx = dlon*deg2m*np.cos(lat*np.pi/180.) # [m]
dy = dlat*deg2m # [m]
return dx, dy
def ang_isob(xiso, yiso):
xiso, yiso = map(np.array, (xiso, yiso))
R = 6371000.0 # Mean radius of the earth in meters (6371 km), from gsw.constants.earth_radius.
deg2rad = np.pi/180. # [rad/deg]
# From the coordinates of the isobath, find the angle it forms with the
# zonal axis, using points k+1 and k.
shth = yiso.size-1
theta = np.zeros(shth)
for k in range(shth):
dyk = R*(yiso[k+1] - yiso[k])
dxk = R*(xiso[k+1] - xiso[k])*np.cos(yiso[k]*deg2rad)
theta[k] = np.arctan2(dyk, dxk)
xisom = 0.5*(xiso[1:] + xiso[:-1])
yisom = 0.5*(yiso[1:] + yiso[:-1])
return xisom, yisom, theta/deg2rad
def near(x, x0, npts=1, return_index=False):
x = list(x)
xnear = []
xidxs = []
for n in range(npts):
idx = np.nanargmin(np.abs(np.array(x)-x0))
xnear.append(x.pop(idx))
if return_index:
xidxs.append(idx)
if return_index: # Sort indices according to the proximity of wanted points.
xidxs = [xidxs[i] for i in np.argsort(xnear).tolist()]
xnear.sort()
if npts==1:
xnear = xnear[0]
if return_index:
xidxs = xidxs[0]
else:
xnear = np.array(xnear)
if return_index:
return xidxs
else:
return xnear
def stripmsk(arr, mask_invalid=True):
if mask_invalid:
arr = np.ma.masked_invalid(arr)
if np.ma.isMA(arr):
msk = arr.mask
arr = arr.data
arr[msk] = np.nan
return arr
##---
CALC_MULTIYEARLY_TSDUVKE = False
NYR_avg = 10 # Average T, S, u, v every 10 years.
#
CALC_UxVaISOB = False
CALC_U_zavg = False
zslabavg_top, zslabavg_bot = 0, 150
CALC_SSH = False
CALC_PT = False
#
# Also plot seasonal cycle for these.
#
CALC_KE = False
CALC_GRADRHO = False
CALC_Jb = True
CALC_Jb_shelf_integral_timeseries = False
CALC_Tauxy = False
#
CALC_PT_zavg = False
CALC_AICE = False
z_PT = 1000 # [m].
CALC_CLIM_DUVKE = False
# Start and end years.
START_YEAR = 1959
END_YEAR = 2009
fname_out_aice = 'aice.npz'
fname_out_eke = 'EKE_MKE.npz'
fname_out_drhomag = 'gradRHO.npz'
fname_out_Jb = 'Jb.npz'
fname_out_Jb_shelf_integral_timeseries = 'Jb_int.npz'
fname_out_Tauxy = 'tauxy.npz'
fname_out_ssh = 'yearly_SSH.npz'
fname_out_u = 'yearly_U.npz'
fname_out_uvxisob = 'yearly_UVxisob.npz'
fname_out_PT = 'yearly_PT.npz'
fname_out_tsduvke = 'decadal_TSD-UV-KE.npz'
fname_out_duvke_clim = 'clim_%d-%d_D-UV-KE.npz'%(START_YEAR, END_YEAR)
fname_dzu = 'POP-dzu_dzt_kzit_subsetSO.nc'
cm2m = 1e-2
fcap = 501
thresh = 1e10
fdir_tail = '/ocn/hist/ia_top_tx0.1_v2_yel_patc_1948_intel.pop.h.????-??.nc'
head_fin = '/lustre/atlas1/cli115/proj-shared/ia_top_tx0.1_v2_60yrs/'
fdirs = glob(head_fin+'ia_top_tx0.1_v2_yel_patc_1948_intel_def_year_????')
fdirs.sort()
if not isinstance(fdirs, list):
fdirs = [fdirs]
fnames = []
for fdir in fdirs:
ystr = int(fdir[-4:])
if np.logical_or(ystr<START_YEAR, ystr>END_YEAR):
continue
fnamesi = glob(fdir + fdir_tail)
fnamesi.sort()
for f in fnamesi:
fnames.append(f)
nc = Dataset(fnames[0])
lont = nc.variables['TLONG'][:fcap,:]
latt = nc.variables['TLAT'][:fcap,:]
lonu = nc.variables['ULONG'][:fcap,:]
latu = nc.variables['ULAT'][:fcap,:]
kmt = nc.variables['KMT'][:fcap,:] - 1 # Convert fortran to python index.
ny, nx = kmt.shape
z = nc.variables['z_t'][:]*cm2m # [m].
t = []
tmo = []
fname_isobs = 'isobaths.nc'
ncx = Dataset(fname_isobs)
dmsm = ncx["1000 m isobath"]['diso'][:]
xmsm = ncx["1000 m isobath"]['xiso'][:]
ymsm = ncx["1000 m isobath"]['yiso'][:]
xm = ncx["1000 m isobath (U-points)"]['xiso'][:]
ym = ncx["1000 m isobath (U-points)"]['yiso'][:]
dm = ncx["1000 m isobath (U-points)"]['diso'][:]
Im = ncx["1000 m isobath (U-points)"]['i'][:]
Jm = ncx["1000 m isobath (U-points)"]['j'][:]
uxmsk = ncx['1000 m isobath (x-isobath U, V masks)']['Umsk'][:]
vxmsk = ncx['1000 m isobath (x-isobath U, V masks)']['Vmsk'][:]
dmm = 0.5*(dm[1:] + dm[:-1])
xmm, ymm, angm = ang_isob(xm, ym) # Angle of the U-points isobath.
##----
if CALC_AICE:
iceconc_thresh = 0.15 # Ice concentration threshold.
fnames = [fnamen.replace('ocn','ice') for fnamen in fnames]
fnames = [fnamen.replace('.pop.h.','.cice.h.') for fnamen in fnames]
AICE = np.array([])
nfirst = True
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
if nfirst:
tarea = nci['tarea'][:].data*1e-6 # [km2]
lon = lon360to180(nci['TLON'][:].data)
lat = nci['TLAT'][:].data
tmask = nci['tmask'][:]
nfirst = False
Aice = nci.variables['aice'][0,:fcap,:]/100. # Convert to fractional sea ice concentration (0-1).
# Calculate total ice area for valid ice cells.
# iarea=aice(aice>=dc & aice<=1.0 & aice~=0).*tarea(aice>=dc & aice<=1.0 & aice~=0).*1e-6;
fice=np.logical_and(Aice>=iceconc_thresh, Aice<=1.0)
aice = np.sum(Aice[fice]*tarea[fice])
t.append(yeari)
AICE = np.append(AICE, aice)
t = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in t])
savez(fname_out_aice, icearea=AICE, lon=lont, lat=latt, tarea=tarea, t=t)
##---
if CALC_UxVaISOB:
dzui = Dataset(fname_dzu).variables['dzu'][:]
dzui = dzui[:,Im,Jm]*cm2m
Uxyr, Ux, ux = None, None, None
Vayr, Va, va = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
# Zonal/meridional vel. components.
uu = nci.variables['UVEL'][0,:,:fcap,:]
vv = nci.variables['VVEL'][0,:,:fcap,:]
ui = uu[:,Im,Jm]*cm2m
vi = vv[:,Im,Jm]*cm2m
if fnamen==fnames[0]:
hmsk = ~ui.mask
hi = np.array([dzui[hmsk[:,n],n].sum(axis=0) for n in range(Im.size)])
Ui = np.sum(ui*dzui, axis=0) # [m2/s], zonal transport per unit along-isobath length.
Vi = np.sum(vi*dzui, axis=0) # [m2/s], meridional transport per unit along-isobath length.
uui = Ui/hi # [m/s], depth-averaged zonal vel.
vvi = Vi/hi # [m/s], depth-averaged meridional vel.
uui = 0.5*(uui[1:] + uui[:-1])
vvi = 0.5*(vvi[1:] + vvi[:-1])
Ui = 0.5*(Ui[1:] + Ui[:-1])
Vi = 0.5*(Vi[1:] + Vi[:-1])
# Rotate depth-averaged velocities using angles based on realistic isobaths.
va, ux = rot_vec(uui, vvi, angle=angm, degrees=True) # ATTENTION: v_along, u_across = rot(u_east, v_north)***
ux = -ux # Positive ONSHORE.
Vva, Uux = rot_vec(Ui, Vi, angle=angm, degrees=True)
Uux = -Uux
ux = ux[np.newaxis,...]
va = va[np.newaxis,...]
Uux = Uux[np.newaxis,...]
Vva = Vva[np.newaxis,...]
if Ux is not None:
Ux = np.vstack((Ux, ux))
Va = np.vstack((Va, va))
UUx = np.vstack((UUx, Uux))
VVa = np.vstack((VVa, Vva))
else:
Ux = ux
Va = va
UUx = Uux
VVa = Vva
nmo+=1
tmo.append(yeari)
if nmo==12:
Ux = Ux.mean(axis=0)[np.newaxis,...]
Va = Va.mean(axis=0)[np.newaxis,...]
UUx = UUx.mean(axis=0)[np.newaxis,...]
VVa = VVa.mean(axis=0)[np.newaxis,...]
if Uxyr is not None:
Uxyr = np.vstack((Uxyr, Ux))
Vayr = np.vstack((Vayr, Va))
UUxyr = np.vstack((UUxyr, UUx))
VVayr = np.vstack((VVayr, VVa))
else:
Uxyr = Ux.copy()
Vayr = Va.copy()
UUxyr = UUx.copy()
VVayr = VVa.copy()
t.append(yeari2)
Ux, UUx = None, None
Va, VVa = None, None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
Uxyr, Vayr = Uxyr.data, Vayr.data
Uxyr[Uxyr>thresh] = np.nan
Vayr[Vayr>thresh] = np.nan
UUxyr, VVayr = UUxyr.data, VVayr.data
UUxyr[UUxyr>thresh] = np.nan
VVayr[VVayr>thresh] = np.nan
# Uxyr, Vayr = Uxyr*cm2m, Vayr*cm2m # [m/s].
savez(fname_out_uvxisob, ux=Uxyr, va=Vayr, Ux=UUxyr, Va=VVayr, lonu=xm, latu=ym, dm=dmm, xm=xmm, ym=ymm, angm=angm, Im=Im, Jm=Jm, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
##----
if CALC_U_zavg:
fzu = np.logical_and(z>=zslabavg_top, z<=zslabavg_bot)
dzu0 = Dataset(fname_dzu).variables['dzu'][fzu,...]*cm2m # [m].
h0 = dzu0.sum(axis=0) # [m].
Uyr, U, u = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
u = nci.variables['UVEL'][0,fzu,:fcap,:]
u = np.sum(u*dzu0, axis=0)/h0
u = u[np.newaxis,...]*cm2m # [m/s].
if U is not None:
U = np.vstack((U, u))
else:
U = u
nmo+=1
tmo.append(yeari)
if nmo==12:
if Umo is not None:
Umo = np.vstack((Umo, U[:, Im, Jm]))
else:
Umo = U.copy()
U = U.mean(axis=0)[np.newaxis,...]
if Uyr is not None:
Uyr = np.vstack((Uyr, U))
else:
Uyr = U.copy()
t.append(int(yeari2))
U = None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
Uyr = Uyr.data
Uyr[Uyr>thresh] = np.nan
Uyr[Uyr==0.] = np.nan
savez(fname_out_u, umonthly=Umo, u=Uyr, lon=lonu, lat=latu, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym, ztop=zslabavg_top, zbot=zslabavg_bot)
##---
if CALC_Tauxy: # Yearly wind stress.
Tauxyr, Tauxmo, Taux, taux = None, None, None, None
Tauyyr, Tauymo, Tauy, tauy = None, None, None, None
skel = np.zeros((ny, nx))
Tauxclm, Tauyclm = dict(), dict()
_ = [Tauxclm.update({mo:skel}) for mo in range(1, 13)]
_ = [Tauyclm.update({mo:skel}) for mo in range(1, 13)]
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
taux = nci.variables['TAUX'][0,:fcap,:]
tauy = nci.variables['TAUY'][0,:fcap,:]
taux, tauy = taux[np.newaxis,...], tauy[np.newaxis,...]
if Taux is not None:
Taux = np.vstack((Taux, taux))
Tauy = np.vstack((Tauy, tauy))
else:
Taux = taux.copy()
Tauy = tauy.copy()
nmo+=1
tmo.append(yeari)
# Update monthly climatological fields.
Tauxclm.update({nmo:Tauxclm[nmo] + taux})
Tauyclm.update({nmo:Tauyclm[nmo] + tauy})
if nmo==12:
if Tauxmo is not None:
Tauxmo = np.vstack((Tauxmo, Taux[:, Im, Jm]))
Tauymo = np.vstack((Tauymo, Tauy[:, Im, Jm]))
else:
Tauxmo, Tauymo = Taux[:, Im,Jm], Tauy[:, Im,Jm]
Taux = Taux.mean(axis=0)[np.newaxis,...]
Tauy = Tauy.mean(axis=0)[np.newaxis,...]
if Tauxyr is not None:
Tauxyr = np.vstack((Tauxyr, Taux))
Tauyyr = np.vstack((Tauyyr, Tauy))
else:
Tauxyr, Tauyyr = Taux.copy(), Tauy.copy()
t.append(int(yeari2))
Taux = None
Tauy = None
nmo=0
Tauxmom = 0.5*(Tauxmo[:, 1:] + Tauxmo[:, :-1])
Tauymom = 0.5*(Tauymo[:, 1:] + Tauymo[:, :-1])
Tauamo, _ = rot_vec(Tauxmom, Tauymom, angle=angm, degrees=True) # positive CLOCKWISE***
dynecm2toNm2 = 1e-1 # 1e-5*1e4
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
#
# Along-isobath wind stress, positive CLOCKWISE around the isobath.
Tauamo = Tauamo.data
Tauamo[Tauamo>thresh] = np.nan
Tauamo = Tauamo*dynecm2toNm2 # [N/m2].
#
#--- Climatological monthly fields.
nt = len(fnames)/12
for mo in range(1, 13):
auxx = Tauxclm[mo].squeeze()*dynecm2toNm2/nt
auxy = Tauyclm[mo].squeeze()*dynecm2toNm2/nt
Tauxclm.update({mo:auxx})
Tauyclm.update({mo:auxy})
#
Tauxyr, Tauyyr = Tauxyr.data, Tauyyr.data
Tauxmo, Tauymo = Tauxmo.data, Tauymo.data
Tauxyr[Tauxyr>thresh] = np.nan
Tauyyr[Tauyyr>thresh] = np.nan
Tauxyr = Tauxyr*dynecm2toNm2 # [N/m2].
Tauyyr = Tauyyr*dynecm2toNm2 # [N/m2].
Tauxmo[Tauxmo>thresh] = np.nan
Tauymo[Tauymo>thresh] = np.nan
Tauxmo = Tauxmo*dynecm2toNm2 # [N/m2].
Tauymo = Tauymo*dynecm2toNm2 # [N/m2].
savez(fname_out_Tauxy, tauxclm=Tauxclm, tauyclm=Tauyclm, tau_alongmo=Tauamo, tauxmo=Tauxmo, tauymo=Tauymo, taux=Tauxyr, tauy=Tauyyr, lon=lonu, lat=latu, dm=dmm, xm=xmm, ym=ymm, angm=angm, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
if CALC_Jb_shelf_integral_timeseries: # Monthly surface buoyancy flux integrated over the shelf.
JbINT = np.array([])
JqINT = np.array([])
JsINT = np.array([])
# Load in 1000 m mask.
finvol = np.bool8(np.load('volmsk1000m.npz')['volmsk'])
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
shf = nci.variables['SHF'][0,:fcap,:] # [W/m2].
if fnamen==fnames[0]:
rho0 = nci.variables['rho_sw'][0]*1e3 # [kg/m3].
rho_fw = nci.variables['rho_fw'][0]*1e3 # [kg/m3].
g = nci.variables['grav'][0]*1e-2 # [m/s2].
Cp = nci.variables['cp_sw'][0]*1e3*1e-7 # [J/kg/degC].
rhoCp = rho0*Cp
#
wetmsk = np.float32(~shf.mask) # Ones in valid (non-continent) cells.
tarea = nci.variables['TAREA'][:fcap,:]*wetmsk*cm2m*cm2m # [m2].
tareain = tarea[finvol] # [m2], zeros on the continent.
Tareain = tareain.sum() # [m2].
JB = shf*0
JQ = JB.copy()
JS = JB.copy()
sfwf = nci.variables['SFWF'][0,:fcap,:]/rho_fw # [(kg of freshwater)/m2/s] / [(kg of freshwater)/m3] = [m/s] = [m3/s/m2]. Volume flux density.
# positive SFWF = Ocean gains freshwater, so this is (P - E).
SSSp = nci.variables['SALT'][0,0,:fcap,:] # [g/kg].
SST = nci.variables['TEMP'][0,0,:fcap,:] # [degC].
SSSA = SA_from_SP(SSSp, 0, lont, latt) # [g/kg].
SSCT = CT_from_pt(SSSA, SST) # [degC].
alpha = falpha(SSSA, SSCT, 0)
beta = fbeta(SSSA, SSCT, 0)
coeffQ = g*alpha/rhoCp
coeffFW = g*beta*SSSA
qb = coeffQ*shf
sb = coeffFW*sfwf # Positive SFWF, ocean gains freshwater, hence buoyancy.
jb = qb + sb # Surface buoyancy flux [W/kg]. Hosegood et al. (2013).
# Accumulate time-averaged 2D fields [W/kg].
JB += jb
JQ += qb
JS += sb
# Integrate over the 1000 m-bounded control surface.
Jbint = np.sum(jb[finvol]*tareain)/Tareain
Jqint = np.sum(qb[finvol]*tareain)/Tareain
Jsint = np.sum(sb[finvol]*tareain)/Tareain
JbINT = np.append(JbINT, Jbint)
JqINT = np.append(JqINT, Jqint)
JsINT = np.append(JsINT, Jsint)
nmo+=1
tmo.append(yeari)
if nmo==12:
nmo=0
nt = len(tmo)
JB /= nt
JQ /= nt
JS /= nt
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
savez(fname_out_Jb_shelf_integral_timeseries, Jb=JbINT, Jq=JqINT, Js=JsINT, t=tmo, Jbxy=JB, Jqxy=JQ, Jsxy=JS, lon=lont, lat=latt)
##---
if CALC_Jb: # Yearly surface buoyancy flux.
thresh = 1e3
Jbyr, Jbmo, Jb, jb = None, None, None, None
finvol = np.bool8(np.load('volmsk1000m.npz')['volmsk'])
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
shf = nci.variables['SHF'][0,:fcap,:] # [W/m2].
if fnamen==fnames[0]:
rho0 = nci.variables['rho_sw'][0]*1e3 # [kg/m3].
rho_fw = nci.variables['rho_fw'][0]*1e3 # [kg/m3].
g = nci.variables['grav'][0]*1e-2 # [m/s2].
Cp = nci.variables['cp_sw'][0]*1e3*1e-7 # [J/kg/degC].
rhoCp = rho0*Cp
#
wetmsk = np.float32(~shf.mask) # Ones in valid (non-continent) cells.
tarea = nci.variables['TAREA'][:fcap,:]*wetmsk*cm2m*cm2m # [m2].
tareain = tarea[finvol] # [m2], zeros on the continent.
Tareain = tareain.sum() # [m2].
sfwf = nci.variables['SFWF'][0,:fcap,:]/rho_fw # [(kg of freshwater)/m2/s] / [(kg of freshwater)/m3] = [m/s] = [m3/s/m2]. Volume flux density.
# positive SFWF = Ocean gains freshwater, so this is (P - E).
SSSp = nci.variables['SALT'][0,0,:fcap,:] # [g/kg].
SST = nci.variables['TEMP'][0,0,:fcap,:] # [degC].
SSSA = SA_from_SP(SSSp, 0, lont, latt) # [g/kg].
SSCT = CT_from_pt(SSSA, SST) # [degC].
alpha = falpha(SSSA, SSCT, 0)
beta = fbeta(SSSA, SSCT, 0)
coeffQ = g*alpha/rhoCp
coeffFW = g*beta*SSSA
qb = coeffQ*shf
sb = coeffFW*sfwf # Positive SFWF, ocean gains freshwater, hence buoyancy.
jb = qb + sb # Surface buoyancy flux [W/kg]. Hosegood et al. (2013).
# Integrate over the 1000 m-bounded control surface.
Jbint = np.sum(jb[finvol]*tareain)/Tareain
jb = jb[np.newaxis,...]
if Jb is not None:
Jb = np.vstack((Jb, jb))
else:
Jb = jb.copy()
nmo+=1
tmo.append(yeari)
if nmo==12:
if Jbmo is not None:
Jbmo = np.vstack((Jbmo, Jb[:, Im, Jm]))
else:
Jbmo = Jb[:, Im, Jm]
Jb = Jb.mean(axis=0)[np.newaxis,...]
if Jbyr is not None:
Jbyr = np.vstack((Jbyr, Jb))
else:
Jbyr = Jb.copy()
t.append(int(yeari2))
Jb = None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
Jbyr = Jbyr.data
# Jbyr[np.abs(Jbyr)>thresh] = np.nan
Jbmo = Jbmo.data
# Jbmo[np.abs(Jbmo)>thresh] = np.nan
savez(fname_out_Jb, Jb=Jbyr, Jbmonthly=Jbmo, lon=lont, lat=latt, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
##---
if CALC_GRADRHO:
ncdzu = Dataset(fname_dzu)
dzt = ncdzu.variables['dzt'][:]*cm2m # [m].
dx, dy = deg2m_dist(lont, latt) # [m].
dx, dy = dx*1e-3, dy*1e-3 # [km].
DRHOMAGyr, DRHOMAGmo, DRHOMAG, drhomag = None, None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
rho = nci.variables['RHO'][0,:,:fcap,:]*1e3 # [kg/m3].
_, drhody, drhodx = np.gradient(rho)
drhody, drhodx = drhody/dy, drhodx/dx # [kg/m3/km].
drhomag = np.sqrt(drhodx*drhodx + drhody*drhody) # [kg/m3/km].
if fnamen==fnames[0]:
solidmsk = np.float32(~rho.mask)
tarea = nci.variables['TAREA'][:fcap,:]*cm2m*cm2m # [m2].
dVt = tarea[np.newaxis,...]*dzt
Vh = np.sum(dVt*solidmsk, axis=0) # [m3].
# plt.figure(); plt.imshow(np.log10(np.flipud(drhomag[0,...])), vmin=-7.5, vmax=-5.5); plt.colorbar(orientation='horizontal')
drhomag = np.sum(drhomag*dVt, axis=0)/Vh
drhomag = drhomag[np.newaxis,...]
if DRHOMAG is not None:
DRHOMAG = np.vstack((DRHOMAG, drhomag))
else:
DRHOMAG = drhomag.copy()
nmo+=1
tmo.append(yeari)
if nmo==12:
if DRHOMAGmo is not None:
DRHOMAGmo = np.vstack((DRHOMAGmo, DRHOMAG[:, Im, Jm]))
else:
DRHOMAGmo = DRHOMAG[:, Im, Jm]
DRHOMAG = DRHOMAG.mean(axis=0)[np.newaxis,...]
if DRHOMAGyr is not None:
DRHOMAGyr = np.vstack((DRHOMAGyr, DRHOMAG))
else:
DRHOMAGyr = DRHOMAG.copy()
t.append(int(yeari2))
DRHOMAG = None
nmo=0
nmotot = 12*len(t) # Total number of each month.
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
DRHOMAGyr = DRHOMAGyr.data
DRHOMAGyr[DRHOMAGyr>thresh] = np.nan
DRHOMAGmo = DRHOMAGmo.data
DRHOMAGmo[DRHOMAGmo>thresh] = np.nan
savez(fname_out_drhomag, gradrho_mag_monthly=DRHOMAGmo, gradrho_mag=DRHOMAGyr, lon=lont, lat=latt, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
##---
if CALC_KE:
ncdzu = Dataset(fname_dzu)
dzu = ncdzu.variables['dzu'][:]*cm2m # [m].
EKEyr, EKEmo, EKE, eke = None, None, None, None
KEyr, KEmo, KE, ke = None, None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
mo = int(yeari[-2:])
_ = system('echo "%s" > t_processing.txt'%yeari)
nci = Dataset(fnamen)
ke = nci.variables['KE'][0,:,:fcap,:]*cm2m*cm2m # [m2/s2].****
if fnamen==fnames[0]:
solidmsk = np.float32(~ke.mask)
uarea = nci.variables['UAREA'][:fcap,:]*cm2m*cm2m # [m2].
dVu = uarea[np.newaxis,...]*dzu
Vh = np.sum(dVu*solidmsk, axis=0) # [m3].
uu = nci.variables['UVEL'][0,:,:fcap,:]*cm2m # [m/s]
vv = nci.variables['VVEL'][0,:,:fcap,:]*cm2m # [m/s]
mke = 0.5*(uu*uu + vv*vv) # [m2/s2]
eke = ke - mke # Monthly horizontal EKE [cm2/s2]
# Depth-average.
# plt.figure(); plt.imshow(np.log10(np.flipud(eke*cm2m**2)), vmin=-5, vmax=-1); plt.colorbar(orientation='horizontal')
mke = np.sum(mke*dVu, axis=0)/Vh # [m2/s2].
eke = np.sum(eke*dVu, axis=0)/Vh # [m2/s2].
mke = mke[np.newaxis,...]
eke = eke[np.newaxis,...]
if EKE is not None:
MKE = np.vstack((MKE, mke))
EKE = np.vstack((EKE, eke))
else:
MKE = mke.copy()
EKE = eke.copy()
nmo+=1
tmo.append(yeari)
if nmo==12:
if EKEmo is not None:
MKEmo = np.vstack((MKEmo, MKE[:, Im, Jm]))
EKEmo = np.vstack((EKEmo, EKE[:, Im, Jm]))
else:
EKEmo = EKE[:, Im, Jm]
MKEmo = MKE[:, Im, Jm]
MKE = MKE.mean(axis=0)[np.newaxis,...]
EKE = EKE.mean(axis=0)[np.newaxis,...]
if EKEyr is not None:
MKEyr = np.vstack((MKEyr, MKE))
EKEyr = np.vstack((EKEyr, EKE))
else:
MKEyr = MKE.copy()
EKEyr = EKE.copy()
t.append(int(yeari2))
MKE = None
EKE = None
nmo=0
t = np.array([Timestamp(str(ti)+'-06-15').to_pydatetime() for ti in t])
tmo = np.array([Timestamp(str(ti)+'-15').to_pydatetime() for ti in tmo])
MKEyr = MKEyr.data
MKEyr[MKEyr>thresh] = np.nan
EKEyr = EKEyr.data
EKEyr[EKEyr>thresh] = np.nan
MKEmo = MKEmo.data
MKEmo[MKEmo>thresh] = np.nan
EKEmo = EKEmo.data
EKEmo[EKEmo>thresh] = np.nan
savez(fname_out_eke, mke=MKEyr, eke=EKEyr, mkemonthly=MKEmo, ekemonthly=EKEmo, lon=lonu, lat=latu, t=t, tmo=tmo, z=z, d=dm, x=xm, y=ym)
##---
if CALC_SSH:
SSHyr, SSH, ssh = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
ssh = nci.variables['SSH'][:,:fcap,:]
ssh = np.ma.masked_greater(ssh, thresh)
if SSH is not None:
SSH = np.vstack((SSH, ssh))
else:
SSH = ssh
nmo+=1
if nmo==12:
SSH = SSH.mean(axis=0)[np.newaxis,...]
if SSHyr is not None:
SSHyr = np.vstack((SSHyr, SSH))
else:
SSHyr = SSH.copy()
t.append(int(yeari2))
SSH = None
nmo=0
t = np.array(t)
SSHyr = SSHyr.data
SSHyr[SSHyr>thresh] = np.nan
SSHyr = SSHyr*cm2m # [m].
savez(fname_out_ssh, ssh=SSHyr, lon=lont, lat=latt, t=t, z=z)
##---
if CALC_PT_zavg: # FIXME: Need to finish this option.
fzt = np.logical_and(z>=zslabavg_top+pt, z<=zslabavg_bot_pt)
dzt0 = Dataset(fname_dzu).variables['dzt'][fzt,...]*cm2m # [m].
h0 = dzt0.sum(axis=0) # [m].
jmax, imax = kmt.shape
landmsk=kmt==-1
idxlev = np.ones((jmax, imax))*idxlev0
slabmsk[:,landmsk] = False
for j in range(jmax):
for i in range(imax):
kmtji = kmt[j, i]
if idxlev0>kmtji:
idxlev[kmtji:, j, i] = False
idxlev = np.int32(idxlev)
PTyr, PT, pt = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
pt = nci.variables['TEMP'][0,fzu,:fcap,:]
pti = np.ones((jmax, imax))*np.nan
for j in range(jmax):
for i in range(imax):
pti[j,i] = pt[idxlev[j,i],j,i]
pti[landmsk] = np.nan
pti = np.ma.masked_greater(pti, thresh)
pt = np.ma.masked_invalid(pti)
pt = np.sum(pt*dzt0, axis=0)/h0
pt = pt[np.newaxis,...]
if PT is not None:
PT = np.vstack((PT, pt))
else:
PT = pt
nmo+=1
if nmo==12:
PT = PT.mean(axis=0)[np.newaxis,...]
if PTyr is not None:
PTyr = np.vstack((PTyr, PT))
else:
PTyr = PT.copy()
t.append(int(yeari2))
PT = None
nmo=0
t = np.array(t)
PTyr = PTyr.data
PTyr[PTyr>thresh] = np.nan
savez(fname_out_PT, pt=PTyr, lon=lont, lat=latt, t=t, z=z, zlev=z_PT)
##---
if CALC_PT:
jmax, imax = kmt.shape
idxlev0 = near(z, z_PT, return_index=True)
idxlev = np.ones((jmax, imax))*idxlev0
# Want temperature at desired level or bottom, whichever is shallower
for j in range(jmax):
for i in range(imax):
kmtji = kmt[j, i]
if idxlev0>kmtji:
idxlev[j, i] = kmtji
idxlev = np.int32(idxlev)
landmsk=idxlev==-1
PTyr, PT, pt = None, None, None
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
pt = nci.variables['TEMP'][0,:,:fcap,:]
pti = np.ones((jmax, imax))*np.nan
for j in range(jmax):
for i in range(imax):
pti[j,i] = pt[idxlev[j,i],j,i]
pti[landmsk] = np.nan
pt = np.ma.masked_greater(pti, thresh)
pt = pt[np.newaxis,...]
if PT is not None:
PT = np.vstack((PT, pt))
else:
PT = pt
nmo+=1
if nmo==12:
PT = PT.mean(axis=0)[np.newaxis,...]
if PTyr is not None:
PTyr = np.vstack((PTyr, PT))
else:
PTyr = PT.copy()
t.append(int(yeari2))
PT = None
nmo=0
t = np.array(t)
PTyr = PTyr.data
PTyr[PTyr>thresh] = np.nan
savez(fname_out_PT, pt=PTyr, lon=lont, lat=latt, t=t, z=z, zlev=z_PT)
##--
if CALC_CLIM_DUVKE:
skel = np.zeros_like(nc.variables['TEMP'][0,:,:fcap,:])
Ujfm, Uamj, Ujas, Uond = skel.copy(), skel.copy(), skel.copy(), skel.copy()
Vjfm, Vamj, Vjas, Vond = skel.copy(), skel.copy(), skel.copy(), skel.copy()
KEjfm, KEamj, KEjas, KEond = skel.copy(), skel.copy(), skel.copy(), skel.copy()
PDjfm, PDamj, PDjas, PDond = skel.copy(), skel.copy(), skel.copy(), skel.copy()
njfm, namj, njas, nond = 0, 0, 0, 0
jfm = [1, 2, 3]
amj = [4, 5, 6]
jas = [7, 8, 9]
ond = [10, 11, 12]
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2, mo = yeari[:-3], int(yeari[-2:])
print(yeari)
nci = Dataset(fnamen)
u = nci.variables['UVEL'][0,:,:fcap,:]
v = nci.variables['VVEL'][0,:,:fcap,:]
ke = nci.variables['KE'][0,:,:fcap,:]
pd = nci.variables['PD'][0,:,:fcap,:]
if mo in jfm:
Ujfm += u
Vjfm += v
KEjfm += ke
PDjfm += pd
njfm+=1
elif mo in amj:
Uamj += u
Vamj += v
KEamj += ke
PDamj += pd
namj+=1
elif mo in jas:
Ujas += u
Vjas += v
KEjas += ke
PDjas += pd
njas+=1
elif mo in ond:
Uond += u
Vond += v
KEond += ke
PDond += pd
nond+=1
Ujfm, Vjfm, KEjfm, PDjfm = Ujfm/njfm, Vjfm/njfm, KEjfm/njfm, PDjfm/njfm
Uamj, Vamj, KEamj, PDamj = Uamj/namj, Vamj/namj, KEamj/namj, PDamj/namj
Ujas, Vjas, KEjas, PDjas = Ujas/njas, Vjas/njas, KEjas/njas, PDjas/njas
Uond, Vond, KEond, PDond = Uond/nond, Vond/nond, KEond/nond, PDond/nond
vnames = ['U', 'V', 'KE', 'PD']
djfm, damj, djas, dond = dict(), dict(), dict(), dict()
_ = [djfm.update({vname:vars()[vname+'jfm']}) for vname in vnames]
_ = [damj.update({vname:vars()[vname+'amj']}) for vname in vnames]
_ = [djas.update({vname:vars()[vname+'jas']}) for vname in vnames]
_ = [dond.update({vname:vars()[vname+'ond']}) for vname in vnames]
savez(fname_out_duvke_clim, jfm=djfm, amj=damj, jas=djas, ond=dond, lon=lont, lat=latt, t=t, z=z, start_year=START_YEAR, end_year=END_YEAR)
if CALC_MULTIYEARLY_TSDUVKE:
nmyr = 12.
NMO_avg = nmyr*NYR_avg
skel = np.zeros_like(nc.variables['TEMP'][0,:,:fcap,:])
Uyr, U = [], skel.copy()
Vyr, V = [], skel.copy()
KEyr, KE = [], skel.copy()
TEMPyr, TEMP = [], skel.copy()
SALTyr, SALT = [], skel.copy()
PDyr, PD = [], skel.copy()
nmo=0
for fnamen in fnames:
yeari = fnamen.split('/')[-1].split('.')[-2]
yeari2 = yeari[:-3]
print(yeari)
nci = Dataset(fnamen)
u = nci.variables['UVEL'][0,:,:fcap,:]
v = nci.variables['VVEL'][0,:,:fcap,:]
ke = nci.variables['KE'][0,:,:fcap,:]
temp = nci.variables['TEMP'][0,:,:fcap,:]
salt = nci.variables['SALT'][0,:,:fcap,:]
pd = nci.variables['PD'][0,:,:fcap,:]
U += u
V += v
KE += ke
TEMP += temp
SALT += salt
PD += pd
nmo+=1
if nmo==1:
yearl = yeari2
elif nmo==NMO_avg:
U, V, KE = map(stripmsk, (U, V, KE))
TEMP, SALT, PD = map(stripmsk, (TEMP, SALT, PD))
Uyr.append(U/nmo)
Vyr.append(V/nmo)
KEyr.append(KE/nmo)
TEMPyr.append(TEMP/nmo)
SALTyr.append(SALT/nmo)
PDyr.append(PD/nmo)
#
U = U*0
V = V*0
KE = KE*0
TEMP = TEMP*0
SALT = SALT*0
PD = PD*0
yearr = yeari2
t.append(yearl+'-'+yearr)
nmo=0
cm2msq = cm2m**2
cmcub2mcub = 1e3
fbad = np.logical_or(Uyr[0]>thresh, ~np.isfinite(Uyr[0]))
for n in range(len(t)):
Uyr[n][fbad] = np.nan
Vyr[n][fbad] = np.nan
KEyr[n][fbad] = np.nan
TEMPyr[n][fbad] = np.nan
SALTyr[n][fbad] = np.nan
PDyr[n][fbad] = np.nan
Uyr = [uyr*cm2m for uyr in Uyr]
Vyr = [vyr*cm2m for vyr in Vyr]
KEyr = [keyr*cm2msq for keyr in KEyr]
PDyr = [rhoyr*cmcub2mcub for rhoyr in PDyr]
savez(fname_out_tsduvke, u=Uyr, v=Vyr, ke=KEyr, temp=TEMPyr, salt=SALTyr, pdens=PDyr, lon=lont, lat=latt, t=t)
|
from odoo import models, fields, api
# 借出人表,一对多,一个人可借出多个资产
class User(models.Model):
_name = 'assets.manager'
_description = '资产借出人'
name = fields.Char('资产借出人', required=True)
card_number = fields.Char('联系电话')
email = fields.Char('电子邮箱')
desc_detail = fields.Text('备注') # 使用人备注
# user_ids = fields.One2many('assets.main', 'user_id', string='使用人')
use_ids = fields.One2many('assets.use', 'lender', string='借出记录')
|
# 撰寫一程式,檔案名稱 ttt_check_position.py
# 定義棋盤格狀態變數 cell_1 ~ cell_9 如下,
# 提示使用者輸入棋子位置(整數 0-9 ),保存在變數 position,
# 檢查該棋盤格位置是否可以落子(可以放棋子在上面),
# 如果不行,顯示 '錯誤,此位置已經有棋子了,結束程式',並退出程式
# 棋盤格變數:保存棋盤狀態的變數
# 變數編號對應的棋盤位置如下
# 1 | 2 | 3
# ---+---+---
# 4 | 5 | 6
# ---+---+---
# 7 | 8 | 9
cell_1 = ' '
cell_2 = ' '
cell_3 = 'o'
cell_4 = ' '
cell_5 = ' '
cell_6 = ' '
cell_7 = 'x'
cell_8 = 'o'
cell_9 = 'x'
# 目前棋子變數:保存目前使用的棋子
chess = 'x'
# 提示使用者輸入棋子位置(整數 0-9 ),保存在變數 position,
# TODO: 程式寫這裡
position = int(input())
# 檢查該棋盤格位置是否可以落子(可以放棋子在上面),
# 如果不行,顯示 '錯誤,此位置已經有棋子了,結束程式',並退出程式
# TODO: 程式寫這裡
is_occupied = True
if position == 1 and cell_1 == ' ' :
is_occupied = False
elif position == 2 and cell_2 == ' ' :
is_occupied = False
elif position == 3 and cell_3 == ' ' :
is_occupied = False
if is_occupied:
print('錯誤,此位置已經有棋子了,結束程式')
exit() |
from flask_classful import FlaskView, route
from applicatioin import db
from flask import render_template, request
from flask import redirect, url_for
from applicatioin.forms.forms import InputForm
from fuzzywuzzy import fuzz
import re
from applicatioin.Builders.ModelBuilder import *
class InputView(FlaskView):
def check_plagiarism(self, a, b):
a= re.sub("[^a-zA-Z]", "", a)
b= re.sub("[^a-zA-Z]", "", b)
print(fuzz.token_sort_ratio(a,b))
return fuzz.token_sort_ratio(a,b)
def count_pos(essay):
tokenized_sentences = essay_to_sentences(essay, remove_stopwords=True)
noun_count = 0
adj_count = 0
verb_count = 0
adv_count = 0
response = {}
for sentence in tokenized_sentences:
tagged_tokens = nltk.pos_tag(sentence)
for token_tuple in tagged_tokens:
pos_tag = token_tuple[1]
if pos_tag.startswith('N'):
noun_count += 1
elif pos_tag.startswith('J'):
adj_count += 1
elif pos_tag.startswith('V'):
verb_count += 1
elif pos_tag.startswith('R'):
adv_count += 1
response.update({"noun":noun_count,'adj': adj_count,'verbs': verb_count,
'adverbs': adv_count})
return response
def spell_check(essay,suggest):
import enchant
d=enchant.Dict("en_US")
c=0
response = {}
for i in range(len(essay.split(" "))):
a=essay.split(" ")
b=a[i]
e=""
e=e.join(b)
e= re.sub("[^a-zA-Z]", "", e)
b=""
if(len(e)):
if (d.check(e) == False):
c=c+1
if suggest:
response.update({str(e): d.suggest(e)})
else:
pass
return response
def word_count(essay):
words=essay_to_wordlist(essay, remove_stopwords=False)
return len(words)
def most_frequent_words(essay):
words=essay_to_wordlist(essay, remove_stopwords=True)
allWordDist = nltk.FreqDist(w for w in words)
t_list=[]
for i in range(10):
t_list.append(allWordDist.most_common(10)[i][0])
return t_list
@route('/',methods=['POST','GET'])
def input_text(self):
form = InputForm()
if request.method == 'POST':
if form.validate_on_submit():
if form.generate_report.data:
essay = form.input_field_one.data
mfw_list = most_frequent_words(essay)
wordCount = word_count(essay)
spellCheck =spell_check(essay,suggest=True)
part_of_speech = count_pos(essay)
return render_template('index.html', form=form, plg=None, most_frequent_words=str(mfw_list),
word_count=wordCount, spell_check=str(spell_check), part_of_speech=str(part_of_speech))
else:
text_one = form.input_field_one.data
text_two = form.input_field_two.data
print(text_one)
return render_template("index.html", form=form,
plg=str(self.check_plagiarism(text_one, text_two)))
return render_template('index.html', form=form, plg=None)
|
def FirstNotRepeatingChar(s):
# write code here
map = {}
for i in range(len(s)):
map[s[i]] = map.get(s[i], 0) + 1
for i in range(len(s)):
if map[s[i]] == 1:
return i
return -1
print(FirstNotRepeatingChar('abac'))
# ½â·¨¶þ
# -*- coding:utf-8 -*-
class Solution:
def FirstNotRepeatingChar(self, s):
# write code here
if len(s)<=0 or len(s)>10000:
return -1
for i,v in enumerate(s):
if s.count(v) == 1:
return i |
import numpy as np
from typing import Dict
from sklearn.mixture import GaussianMixture
from alibi_detect.od.sklearn.base import SklearnOutlierDetector
class GMMSklearn(SklearnOutlierDetector):
def __init__(
self,
n_components: int,
):
"""sklearn backend for the Gaussian Mixture Model (GMM) outlier detector.
Parameters
----------
n_components
Number of components in gaussian mixture model.
Raises
------
ValueError
If `n_components` is less than 1.
"""
super().__init__()
if n_components < 1:
raise ValueError('n_components must be at least 1')
self.n_components = n_components
def fit( # type: ignore[override]
self,
x_ref: np.ndarray,
tol: float = 1e-3,
max_iter: int = 100,
n_init: int = 1,
init_params: str = 'kmeans',
verbose: int = 0,
) -> Dict:
"""Fit the SKLearn GMM model`.
Parameters
----------
x_ref
Reference data.
tol
Convergence threshold. EM iterations will stop when the lower bound average gain is below this threshold.
max_iter
Maximum number of EM iterations to perform.
n_init
Number of initializations to perform.
init_params
Method used to initialize the weights, the means and the precisions. Must be one of:
'kmeans' : responsibilities are initialized using kmeans.
'kmeans++' : responsibilities are initialized using kmeans++.
'random' : responsibilities are initialized randomly.
'random_from_data' : responsibilities are initialized randomly from the data.
verbose
Enable verbose output. If 1 then it prints the current initialization and each iteration step. If greater
than 1 then it prints also the log probability and the time needed for each step.
Returns
-------
Dictionary with fit results. The dictionary contains the following keys:
- converged: bool indicating whether EM algorithm converged.
- n_iter: number of EM iterations performed.
- lower_bound: log-likelihood lower bound.
"""
self.gmm = GaussianMixture(
n_components=self.n_components,
tol=tol,
max_iter=max_iter,
n_init=n_init,
init_params=init_params,
verbose=verbose,
)
self.gmm = self.gmm.fit(
x_ref,
)
self._set_fitted()
return {
'converged': self.gmm.converged_,
'n_iter': self.gmm.n_iter_,
'lower_bound': self.gmm.lower_bound_
}
def format_fit_kwargs(self, fit_kwargs: Dict) -> Dict:
"""Format kwargs for `fit` method.
Parameters
----------
kwargs
dictionary of Kwargs to format. See `fit` method for details.
Returns
-------
Formatted kwargs.
"""
return dict(
tol=fit_kwargs.get('tol', 1e-3),
max_iter=(lambda v: 100 if v is None else v)(fit_kwargs.get('max_epochs', None)),
n_init=fit_kwargs.get('n_init', 1),
init_params=fit_kwargs.get('init_params', 'kmeans'),
verbose=fit_kwargs.get('verbose', 0),
)
def score(self, x: np.ndarray) -> np.ndarray:
"""Computes the score of `x`
Parameters
----------
x
`np.ndarray` with leading batch dimension.
Returns
-------
`np.ndarray` of scores with leading batch dimension.
Raises
------
NotFittedError
Raised if method called and detector has not been fit.
"""
self.check_fitted()
return - self.gmm.score_samples(x)
|
##########################################################################
#
# Copyright (c) 2022, Cinesite VFX Ltd. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import unittest
import imath
import IECore
import Gaffer
import GafferTest
import GafferUITest
import GafferScene
import GafferSceneTest
import GafferSceneUI
class AttributeInspectorTest( GafferUITest.TestCase ) :
def testName( self ) :
light = GafferSceneTest.TestLight()
inspector = GafferSceneUI.Private.AttributeInspector( light["out"], None, "gl:visualiser:scale" )
self.assertEqual( inspector.name(), "gl:visualiser:scale" )
@staticmethod
def __inspect( scene, path, attribute, editScope=None ) :
editScopePlug = Gaffer.Plug()
editScopePlug.setInput( editScope["enabled"] if editScope is not None else None )
inspector = GafferSceneUI.Private.AttributeInspector( scene, editScopePlug, attribute )
with Gaffer.Context() as context :
context["scene:path"] = IECore.InternedStringVectorData( path.split( "/" )[1:] )
return inspector.inspect()
def __assertExpectedResult(
self,
result,
source,
sourceType,
editable,
nonEditableReason = "",
edit = None,
editWarning = ""
) :
self.assertEqual( result.source(), source )
self.assertEqual( result.sourceType(), sourceType )
self.assertEqual( result.editable(), editable )
if editable :
self.assertEqual( nonEditableReason, "" )
self.assertEqual( result.nonEditableReason(), "" )
acquiredEdit = result.acquireEdit()
self.assertIsNotNone( acquiredEdit )
if result.editScope() :
self.assertTrue( result.editScope().isAncestorOf( acquiredEdit ) )
if edit is not None :
self.assertEqual(
acquiredEdit.fullName() if acquiredEdit is not None else "",
edit.fullName() if edit is not None else ""
)
self.assertEqual( result.editWarning(), editWarning )
else :
self.assertIsNone( edit )
self.assertEqual( editWarning, "" )
self.assertEqual( result.editWarning(), "" )
self.assertNotEqual( nonEditableReason, "" )
self.assertEqual( result.nonEditableReason(), nonEditableReason )
self.assertRaises( RuntimeError, result.acquireEdit )
def testValue( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
light["visualiserAttributes"]["scale"]["value"].setValue( 2.0 )
self.assertEqual(
self.__inspect( light["out"], "/light", "gl:visualiser:scale" ).value(),
IECore.FloatData( 2.0 )
)
def testSourceAndEdits( self ) :
s = Gaffer.ScriptNode()
s["light"] = GafferSceneTest.TestLight()
s["light"]["visualiserAttributes"]["scale"]["enabled"].setValue( True )
s["group"] = GafferScene.Group()
s["editScope1"] = Gaffer.EditScope()
s["editScope2"] = Gaffer.EditScope()
s["group"]["in"][0].setInput( s["light"]["out"] )
s["editScope1"].setup( s["group"]["out"] )
s["editScope1"]["in"].setInput( s["group"]["out"] )
s["editScope2"].setup( s["editScope1"]["out"] )
s["editScope2"]["in"].setInput( s["editScope1"]["out"] )
# Should be able to edit light directly.
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "gl:visualiser:scale", None ),
source = s["light"]["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["visualiserAttributes"]["scale"]
)
# Even if there is an edit scope in the way
self.__assertExpectedResult(
self.__inspect( s["editScope1"]["out"], "/group/light", "gl:visualiser:scale", None),
source = s["light"]["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["visualiserAttributes"]["scale"]
)
# We shouldn't be able to edit it if we've been told to use and EditScope and it isn't in the history
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "gl:visualiser:scale", s["editScope1"] ),
source = s["light"]["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable=False,
nonEditableReason = "The target EditScope (editScope1) is not in the scene history."
)
# If it is in the history though, and we're told to use it, then we will.
inspection = self.__inspect( s["editScope2"]["out"], "/group/light", "gl:visualiser:scale", s["editScope2"] )
self.assertIsNone(
GafferScene.EditScopeAlgo.acquireAttributeEdit(
s["editScope2"], "/group/light", "gl:visualiser:scale", createIfNecessary = False
)
)
self.__assertExpectedResult(
inspection,
source=s["light"]["visualiserAttributes"]["scale"],
sourceType=SourceType.Upstream,
editable = True
)
lightEditScope2Edit = inspection.acquireEdit()
self.assertIsNotNone( lightEditScope2Edit )
self.assertEqual(
lightEditScope2Edit,
GafferScene.EditScopeAlgo.acquireAttributeEdit(
s["editScope2"], "/group/light", "gl:visualiser:scale", createIfNecessary = False
)
)
# If there's an edit downstream of the EditScope we're asked to use,
# then we're allowed to be editable still
inspection = self.__inspect( s["editScope2"]["out"], "/group/light", "gl:visualiser:scale", s["editScope1"] )
self.assertTrue( inspection.editable() )
self.assertEqual( inspection.nonEditableReason(), "" )
lightEditScope1Edit = inspection.acquireEdit()
self.assertIsNotNone( lightEditScope1Edit )
self.assertEqual(
lightEditScope1Edit,
GafferScene.EditScopeAlgo.acquireAttributeEdit(
s["editScope1"], "/group/light", "gl:visualiser:scale", createIfNecessary = False
)
)
self.assertEqual( inspection.editWarning(), "" )
# If there is a source node inside an edit scope, make sure we use that
s["editScope1"]["light2"] = GafferSceneTest.TestLight()
s["editScope1"]["light2"]["visualiserAttributes"]["scale"]["enabled"].setValue( True )
s["editScope1"]["light2"]["visualiserAttributes"]["scale"]["value"].setValue( 0.5 )
s["editScope1"]["light2"]["name"].setValue( "light2" )
s["editScope1"]["parentLight2"] = GafferScene.Parent()
s["editScope1"]["parentLight2"]["parent"].setValue( "/" )
s["editScope1"]["parentLight2"]["children"][0].setInput( s["editScope1"]["light2"]["out"] )
s["editScope1"]["parentLight2"]["in"].setInput( s["editScope1"]["BoxIn"]["out"] )
s["editScope1"]["AttributeEdits"]["in"].setInput( s["editScope1"]["parentLight2"]["out"] )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "gl:visualiser:scale", s["editScope1"] ),
source = s["editScope1"]["light2"]["visualiserAttributes"]["scale"],
sourceType = SourceType.EditScope,
editable = True,
edit = s["editScope1"]["light2"]["visualiserAttributes"]["scale"]
)
# If there is a tweak in the scope's processor, make sure we use that
light2Edit = GafferScene.EditScopeAlgo.acquireAttributeEdit(
s["editScope1"], "/light2", "gl:visualiser:scale", createIfNecessary = True
)
light2Edit["enabled"].setValue( True )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "gl:visualiser:scale", s["editScope1"] ),
source = light2Edit,
sourceType = SourceType.EditScope,
editable = True,
edit = light2Edit
)
# If there is a manual tweak downstream of the scope's scene processor, make sure we use that
s["editScope1"]["tweakLight2"] = GafferScene.AttributeTweaks()
s["editScope1"]["tweakLight2"]["in"].setInput( s["editScope1"]["AttributeEdits"]["out"] )
s["editScope1"]["tweakLight2Filter"] = GafferScene.PathFilter()
s["editScope1"]["tweakLight2Filter"]["paths"].setValue( IECore.StringVectorData( [ "/light2" ] ) )
s["editScope1"]["tweakLight2"]["filter"].setInput( s["editScope1"]["tweakLight2Filter"]["out"] )
s["editScope1"]["BoxOut"]["in"].setInput( s["editScope1"]["tweakLight2"]["out"] )
editScopeAttributeTweak = Gaffer.TweakPlug( "gl:visualiser:scale", 4.0 )
s["editScope1"]["tweakLight2"]["tweaks"].addChild( editScopeAttributeTweak )
self.__assertExpectedResult(
self.__inspect( s["editScope2"]["out"], "/light2", "gl:visualiser:scale", s["editScope1"] ),
source = editScopeAttributeTweak,
sourceType = SourceType.EditScope,
editable = True,
edit = editScopeAttributeTweak
)
# If there is a manual tweak outside of an edit scope, make sure we use that with no scope
s["independentAttributeTweak"] = GafferScene.AttributeTweaks()
s["independentAttributeTweak"]["in"].setInput( s["editScope2"]["out"] )
s["independentAttributeTweakFilter"] = GafferScene.PathFilter()
s["independentAttributeTweakFilter"]["paths"].setValue( IECore.StringVectorData( [ "/group/light" ] ) )
s["independentAttributeTweak"]["filter"].setInput( s["independentAttributeTweakFilter"]["out"] )
independentAttributeTweakPlug = Gaffer.TweakPlug( "gl:visualiser:scale", 8.0 )
independentAttributeTweakPlug["enabled"].setValue( True )
s["independentAttributeTweak"]["tweaks"].addChild( independentAttributeTweakPlug )
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/group/light", "gl:visualiser:scale", None ),
source = independentAttributeTweakPlug,
sourceType = SourceType.Other,
editable = True,
edit = independentAttributeTweakPlug
)
# Check we show the last input plug if the source plug is an output
scaleCurve = Gaffer.Animation.acquire( s["light"]["visualiserAttributes"]["scale"]["value"] )
scaleCurve.addKey( Gaffer.Animation.Key( time = 1, value = 2 ) )
self.__assertExpectedResult(
self.__inspect( s["group"]["out"], "/group/light", "gl:visualiser:scale", None ),
source = s["light"]["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable = True,
edit = s["light"]["visualiserAttributes"]["scale"]
)
# Check editWarnings and nonEditableReasons
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/group/light", "gl:visualiser:scale", s["editScope2"] ),
source = independentAttributeTweakPlug,
sourceType = SourceType.Downstream,
editable = True,
edit = lightEditScope2Edit,
editWarning = "Attribute has edits downstream in independentAttributeTweak."
)
s["editScope2"]["enabled"].setValue( False )
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/group/light", "gl:visualiser:scale", s["editScope2"] ),
source = independentAttributeTweakPlug,
sourceType = SourceType.Downstream,
editable = False,
nonEditableReason = "The target EditScope (editScope2) is disabled."
)
s["editScope2"]["enabled"].setValue( True )
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"], True )
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/light2", "gl:visualiser:scale", s["editScope2"] ),
source = editScopeAttributeTweak,
sourceType = SourceType.Upstream,
editable = False,
nonEditableReason = "editScope2 is locked."
)
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"], False )
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"]["AttributeEdits"]["edits"], True )
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/light2", "gl:visualiser:scale", s["editScope2"] ),
source = editScopeAttributeTweak,
sourceType = SourceType.Upstream,
editable = False,
nonEditableReason = "editScope2.AttributeEdits.edits is locked."
)
Gaffer.MetadataAlgo.setReadOnly( s["editScope2"]["AttributeEdits"], True )
self.__assertExpectedResult(
self.__inspect( s["independentAttributeTweak"]["out"], "/light2", "gl:visualiser:scale", s["editScope2"] ),
source = editScopeAttributeTweak,
sourceType = SourceType.Upstream,
editable = False,
nonEditableReason = "editScope2.AttributeEdits is locked."
)
def testAttributesWarning( self ) :
sphere = GafferScene.Sphere()
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
customAttributes = GafferScene.CustomAttributes()
customAttributes["in"].setInput( sphere["out"] )
customAttributes["filter"].setInput( sphereFilter["out"] )
customAttributes["attributes"].addChild(
Gaffer.NameValuePlug(
"test:attr",
IECore.FloatData( 1.0 ),
Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic,
"testPlug"
)
)
self.__assertExpectedResult(
self.__inspect( customAttributes["out"], "/sphere", "test:attr", None ),
source = customAttributes["attributes"]["testPlug"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = customAttributes["attributes"]["testPlug"],
editWarning = "Edits to \"test:attr\" may affect other locations in the scene."
)
def testEditScopeNotInHistory( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
attributeTweaks = GafferScene.AttributeTweaks()
attributeTweaks["in"].setInput( light["out"] )
attributeTweaks["filter"].setInput( lightFilter["out"] )
attributeTweaks["tweaks"].addChild( Gaffer.TweakPlug( "gl:visualiser:scale", 2.0 ) )
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( light["out"], "/light", "gl:visualiser:scale", editScope ),
source = light["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable = False,
nonEditableReason = "The target EditScope (EditScope) is not in the scene history."
)
self.__assertExpectedResult(
self.__inspect( attributeTweaks["out"], "/light", "gl:visualiser:scale" ),
source = attributeTweaks["tweaks"][0],
sourceType = SourceType.Other,
editable = True,
edit = attributeTweaks["tweaks"][0]
)
self.__assertExpectedResult(
self.__inspect( attributeTweaks["out"], "/light", "gl:visualiser:scale", editScope ),
source = attributeTweaks["tweaks"][0],
sourceType = SourceType.Other,
editable = False,
nonEditableReason = "The target EditScope (EditScope) is not in the scene history."
)
def testDisabledTweaks( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
attributeTweaks = GafferScene.AttributeTweaks()
attributeTweaks["in"].setInput( light["out"] )
attributeTweaks["filter"].setInput( lightFilter["out"] )
scaleTweak = Gaffer.TweakPlug( "gl:visualiser:scale", 2.0 )
attributeTweaks["tweaks"].addChild( scaleTweak )
SourceType = GafferSceneUI.Private.Inspector.Result.SourceType
self.__assertExpectedResult(
self.__inspect( attributeTweaks["out"], "/light", "gl:visualiser:scale" ),
source = scaleTweak,
sourceType = SourceType.Other,
editable = True,
edit = scaleTweak
)
scaleTweak["enabled"].setValue( False )
self.__assertExpectedResult(
self.__inspect( attributeTweaks["out"], "/light", "gl:visualiser:scale" ),
source = light["visualiserAttributes"]["scale"],
sourceType = SourceType.Other,
editable = True,
edit = light["visualiserAttributes"]["scale"]
)
def testEditScopeNesting( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
editScope1 = Gaffer.EditScope( "EditScope1" )
editScope1.setup( light["out"] )
editScope1["in"].setInput( light["out"] )
i = self.__inspect( editScope1["out"], "/light", "gl:visualiser:scale", editScope1 )
scope1Edit = i.acquireEdit()
scope1Edit["enabled"].setValue( True )
self.assertEqual( scope1Edit.ancestor( Gaffer.EditScope ), editScope1 )
editScope2 = Gaffer.EditScope()
editScope2.setup( light["out"] )
editScope1.addChild( editScope2 )
editScope2["in"].setInput( scope1Edit.ancestor( GafferScene.SceneProcessor )["out"] )
editScope1["BoxOut"]["in"].setInput( editScope2["out"] )
i = self.__inspect( editScope1["out"], "/light", "gl:visualiser:scale", editScope2 )
scope2Edit = i.acquireEdit()
scope2Edit["enabled"].setValue( True )
self.assertEqual( scope2Edit.ancestor( Gaffer.EditScope ), editScope2 )
# Check we still fin the edit in scope 1
i = self.__inspect( editScope1["out"], "/light", "gl:visualiser:scale", editScope1 )
self.assertEqual( i.acquireEdit()[0].ancestor( Gaffer.EditScope ), editScope1 )
def testDownstreamSourceType( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
lightFilter = GafferScene.PathFilter()
lightFilter["paths"].setValue( IECore.StringVectorData( [ "/light" ] ) )
attributeTweaks = GafferScene.AttributeTweaks()
attributeTweaks["in"].setInput( editScope["out"] )
attributeTweaks["filter"].setInput( lightFilter["out"] )
scaleTweak = Gaffer.TweakPlug( "gl:visualiser:scale", 2.0 )
attributeTweaks["tweaks"].addChild( scaleTweak )
self.__assertExpectedResult(
self.__inspect( attributeTweaks["out"], "/light", "gl:visualiser:scale", editScope ),
source = scaleTweak,
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Downstream,
editable = True,
edit = None,
editWarning = "Attribute has edits downstream in AttributeTweaks."
)
def testLightInsideBox( self ) :
box = Gaffer.Box()
box["light"] = GafferSceneTest.TestLight()
box["light"]["visualiserAttributes"]["scale"]["enabled"].setValue( True )
Gaffer.PlugAlgo.promote( box["light"]["out"] )
self.__assertExpectedResult(
self.__inspect( box["out"], "/light", "gl:visualiser:scale" ),
source = box["light"]["visualiserAttributes"]["scale"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = box["light"]["visualiserAttributes"]["scale"]
)
def testDirtiedSignal( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
editScope1 = Gaffer.EditScope()
editScope1.setup( light["out"] )
editScope1["in"].setInput( light["out"] )
editScope2 = Gaffer.EditScope()
editScope2.setup( editScope1["out"] )
editScope2["in"].setInput( editScope1["out"] )
settings = Gaffer.Node()
settings["editScope"] = Gaffer.Plug()
inspector = GafferSceneUI.Private.AttributeInspector(
editScope2["out"], settings["editScope"], "gl:visualiser:scale"
)
cs = GafferTest.CapturingSlot( inspector.dirtiedSignal() )
# Tweaking an attribute should dirty the inspector
light["visualiserAttributes"]["scale"]["value"].setValue( 2.0 )
self.assertEqual( len( cs ) , 1 )
# But tweaking the transform should not.
light["transform"]["translate"]["x"].setValue( 10 )
self.assertEqual( len( cs ), 1 )
# Changing EditScope should also dirty the inspector
settings["editScope"].setInput( editScope1["enabled"] )
self.assertEqual( len( cs ), 2 )
settings["editScope"].setInput( editScope2["enabled"] )
self.assertEqual( len( cs ), 3 )
settings["editScope"].setInput( None )
self.assertEqual( len( cs ), 4 )
def testNonExistentLocation( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
self.assertIsNone( self.__inspect( light["out"], "/nothingHere", "gl:visualiser:scale" ) )
def testNonExistentAttribute( self ) :
light = GafferSceneTest.TestLight()
self.assertIsNone( self.__inspect( light["out"], "/light", "bad:attribute" ) )
def testReadOnlyMetadataSignalling( self ) :
light = GafferSceneTest.TestLight()
light["visualiserAttributes"]["scale"]["enabled"].setValue( True )
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
settings = Gaffer.Node()
settings["editScope"] = Gaffer.Plug()
inspector = GafferSceneUI.Private.AttributeInspector(
editScope["out"], settings["editScope"], "gl:visualiser:scale"
)
cs = GafferTest.CapturingSlot( inspector.dirtiedSignal() )
Gaffer.MetadataAlgo.setReadOnly( editScope, True )
Gaffer.MetadataAlgo.setReadOnly( editScope, False )
self.assertEqual( len( cs ), 0 ) # Changes not relevant because we're not using the EditScope.
settings["editScope"].setInput( editScope["enabled"] )
self.assertEqual( len( cs ), 1 )
Gaffer.MetadataAlgo.setReadOnly( editScope, True )
self.assertEqual( len( cs ), 2 ) # Change affects the result of `inspect().editable()`
def testCameraAttribute( self ) :
camera = GafferScene.Camera()
camera["visualiserAttributes"]["scale"]["enabled"].setValue( True )
self.__assertExpectedResult(
self.__inspect( camera["out"], "/camera", "gl:visualiser:scale", None ),
source = camera["visualiserAttributes"]["scale"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = camera["visualiserAttributes"]["scale"]
)
def testAttributes( self ) :
sphere = GafferScene.Sphere()
sphereFilter = GafferScene.PathFilter()
sphereFilter["paths"].setValue( IECore.StringVectorData( [ "/sphere" ] ) )
customAttributes = GafferScene.CustomAttributes()
customAttributes["in"].setInput( sphere["out"] )
customAttributes["filter"].setInput( sphereFilter["out"] )
customAttributes["attributes"].addChild(
Gaffer.NameValuePlug(
"test:attr",
IECore.FloatData( 1.0 ),
Gaffer.Plug.Flags.Default | Gaffer.Plug.Flags.Dynamic,
"testPlug"
)
)
self.__assertExpectedResult(
self.__inspect( customAttributes["out"], "/sphere", "test:attr", None ),
source = customAttributes["attributes"]["testPlug"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = customAttributes["attributes"]["testPlug"],
editWarning = "Edits to \"test:attr\" may affect other locations in the scene."
)
def testDisabledAttribute( self ) :
light = GafferSceneTest.TestLight()
group = GafferScene.Group()
group["in"][0].setInput( light["out"] )
# The value of the attribute isn't editable in this case, but the `enabled`
# plug is, so it is considered editable.
self.__assertExpectedResult(
self.__inspect( light["out"], "/light", "gl:visualiser:scale", None ),
source = light["visualiserAttributes"]["scale"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = light["visualiserAttributes"]["scale"]
)
# Values should be inherited from predecessors in the history.
self.__assertExpectedResult(
self.__inspect( group["out"], "/group/light", "gl:visualiser:scale", None ),
source = light["visualiserAttributes"]["scale"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = light["visualiserAttributes"]["scale"]
)
def testRegisteredAttribute( self ) :
light = GafferSceneTest.TestLight()
editScope = Gaffer.EditScope()
editScope.setup( light["out"] )
editScope["in"].setInput( light["out"] )
self.__assertExpectedResult(
self.__inspect( editScope["out"], "/light", "gl:visualiser:scale", None ),
source = light["visualiserAttributes"]["scale"],
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.Other,
editable = True,
edit = light["visualiserAttributes"]["scale"]
)
inspection = self.__inspect( editScope["out"], "/light", "gl:visualiser:scale", editScope )
edit = inspection.acquireEdit()
self.assertEqual(
edit,
GafferScene.EditScopeAlgo.acquireAttributeEdit(
editScope, "/light", "gl:visualiser:scale", createIfNecessary = False
)
)
edit["enabled"].setValue( True )
# With the tweak in place in `editScope`, force the history to be checked again
# to make sure we get the right source back.
self.__assertExpectedResult(
self.__inspect( editScope["out"], "/light", "gl:visualiser:scale", editScope ),
source = edit,
sourceType = GafferSceneUI.Private.Inspector.Result.SourceType.EditScope,
editable = True,
edit = edit
)
def testDontEditParentOfInspectedLocation( self ) :
light = GafferSceneTest.TestLight()
childGroup = GafferScene.Group()
childGroup["in"][0].setInput( light["out"] )
childGroup["name"].setValue( "child" )
parentGroup = GafferScene.Group()
parentGroup["in"][0].setInput( childGroup["out"] )
parentGroup["name"].setValue( "parent" )
editScope = Gaffer.EditScope()
editScope.setup( parentGroup["out"] )
editScope["in"].setInput( parentGroup["out"] )
inspection = self.__inspect( editScope["out"], "/parent/child", "gl:visualiser:scale", editScope )
edit = inspection.acquireEdit()
row = edit.ancestor( Gaffer.Spreadsheet.RowPlug )
self.assertEqual( row["name"].getValue(), "/parent/child" )
edit["enabled"].setValue( False )
inspection = self.__inspect( editScope["out"], "/parent/child", "gl:visualiser:scale", editScope )
edit = inspection.acquireEdit()
row = edit.ancestor( Gaffer.Spreadsheet.RowPlug )
self.assertEqual( row["name"].getValue(), "/parent/child" )
if __name__ == "__main__" :
unittest.main()
|
API_VERSION = '1.6'
USER_AGENT = 'cmtt-python-wrapper'
CALLS_LIMIT = 3
LOG_FORMAT = '%(asctime)s - %(name)s - %(levelname)s - [%(filename)s:%(lineno)d] - %(message)s'
|
# Standard Library Imports
import urllib2
import re
# Core Django Imports
from django.core.management.base import BaseCommand
from django.template.defaultfilters import slugify
from django.utils.encoding import smart_str
# Third Party App Imports
import bs4
# This App Imports
from main.models import CIAWFBFieldInfo, CIAWFBEntry
import main.ciawfbgroupedfieldlist
from updatecountries import field_data_codes
class Command(BaseCommand):
help = 'Checks field data for errors, and outputs any missing fields.'
def handle(self, *args, **options):
def extract_field_data():
""" Note: Requires HTML5 Library: pip intall html5lib
"""
fullURL = "https://www.cia.gov/library/publications/the-world-factbook/docs/notesanddefs.html"
soup = bs4.BeautifulSoup(urllib2.urlopen(fullURL).read())
field_data = []
tables = soup.find_all('table', width="638", id=re.compile('^\d\d\d\d$'))
for table in tables:
try:
title = table.find('td', class_="category_titles").text.strip()
except AttributeError:
continue
try:
field_desc = table.find('div', class_="category_data").text.encode('utf-8').strip()
except AttributeError:
continue
try:
field_list_url = table.find('a', href=re.compile(r'^../fields/\d\d\d\d.html'))['href']
except (AttributeError, TypeError):
continue
field_data.append((title, field_desc, field_list_url))
return field_data
field_data = extract_field_data()
# Set up a sample CIAWFB Entry to test against
sample_CIAWFB_model = CIAWFBEntry.objects.filter(country__id=1)
model_field_names = sample_CIAWFB_model[0]._meta.get_all_field_names()
live_site_field_names = []
group_list = main.ciawfbgroupedfieldlist.get_CIAWFB_group_list()
grouped_fields = main.ciawfbgroupedfieldlist.get_CIAWFB_grouped_fields()
for field_trio in field_data:
title = smart_str(field_trio[0].strip()).decode('ascii', 'ignore')
db_title = slugify(title).replace('-', '_').rstrip('_')
live_site_field_names.append(db_title)
desc = field_trio[1].strip().decode('ascii', 'ignore')
# Verify that fields from live CIAWFB are in model
if db_title not in model_field_names:
print('ERROR: Field name "%s" is in the live CIAWFB site but not in models.py.' % db_title)
# Verify that fields from live CIAWFB are in groupedfieldlist:
field_matches = False
for group in group_list:
if db_title in grouped_fields[group]:
field_matches = True
break
if field_matches is False:
print('ERROR: Field name "%s" is in the live CIAWFB site but is not in the grouped field list.'
% db_title)
# Verify that fields from live CIAWFB are in updatecountries.field_data_codes
if db_title not in field_data_codes.keys():
print('ERROR: Field name "%s" is in the live CIAWFB site but is not in updatecountries field_data_codes' %
db_title)
# Write field name, field description to database.
try:
CIAWFB_order_entry = CIAWFBFieldInfo.objects.get(field_dbname=db_title)
except CIAWFBFieldInfo.DoesNotExist:
CIAWFB_order_entry = CIAWFBFieldInfo(field_dbname=db_title)
CIAWFB_order_entry.field_description = desc
CIAWFB_order_entry.field_name = title
CIAWFB_order_entry.save()
# Verify that fields from model are in live CIAWFB
for field_name in model_field_names:
if field_name not in live_site_field_names:
if field_name not in ['country', 'date_entered', 'id']:
print('ERROR: Field name "%s" is in models.py but is not in the live CIAWFB site.' %
field_name)
# Verify that fields from grouped field list are in live CIAWFB
for group in group_list:
for field_name in grouped_fields[group]:
if field_name not in live_site_field_names:
print('ERROR: Field name "%s" is in live CIAWFB site but is not in the grouped field list.' %
field_name)
# Verify that fields from field_data_codes are in live CIAWFB
for field_name in field_data_codes.keys():
if field_name not in live_site_field_names:
print('ERROR: Field name "%s" is in live CIAWFB site but is not in the field_data_codes.' %
field_name)
|
#Creating a racing turtle game using loops and drawing a race track
from turtle import *
from random import randint
speed(10)
penup()
goto(-140,140)
for step in range(25):
write(step, align='center')
right(90)
forward(10)
pendown()
forward(150)
penup()
backward(160)
left(90)
forward(20)
#ada
ada = Turtle()
ada.color('red')
ada.shape('turtle')
ada.penup()
ada.goto(-160, 100)
ada.pendown()
#bob
bob = Turtle()
bob.color('blue')
bob.shape('turtle')
bob.penup()
bob.goto(-160, 70)
bob.pendown()
#leo
leo = Turtle()
leo.color('green')
leo.shape('turtle')
leo.penup()
leo.goto(-160, 40)
leo.pendown()
#dona
dona = Turtle()
dona.color('yellow')
dona.shape('turtle')
dona.penup()
dona.goto(-160, 10)
dona.pendown()
for turn in range(170):
ada.forward(randint(1,5))
bob.forward(randint(1,5))
leo.forward(randint(1,5))
dona.forward(randint(1,5))
|
from aiorequest import Credentials
from tests.markers import asyncio, unit
pytestmark = [unit, asyncio]
async def test_username(credentials: Credentials) -> None:
assert await credentials.username == "superuser"
async def test_password(credentials: Credentials) -> None:
assert await credentials.password == "superpass"
async def test_as_string(credentials: Credentials) -> None:
assert (
await credentials.as_str() == "AuthCredentials(username='superuser', password='superpass')"
)
|
#!/bin/python3
import sys
S = input()
try:
x = int(S)
except ValueError:
print("Bad String")
else:
print(x)
|
from django.db import models
# Create your models here.
class NewsInfo(models.Model):
Title = models.CharField(max_length=100)
Url = models.CharField(max_length=100)
Author = models.CharField(max_length=20)
Time = models.CharField(max_length=50)
Reading = models.CharField(max_length=20)
Comment = models.CharField(max_length=20)
Classify = models.CharField(max_length=20)
Content = models.CharField(max_length=10000) |
# import the necessary packages
from enum import Enum
import numpy as np
import imutils
import cv2
import time
from Agent import Agent, AgentType
from KinematicsAgent import *
from Entities import Color, Block, Polygon
class SensorAgent(Agent):
def __init__(self, agentType):
Agent.__init__(self, agentType)
self._colorRange = {
Color.Red: [(0, 200, 0), (3, 255, 255)],
Color.Green: [(33, 165, 0), (70, 255, 255)],
Color.Blue: [(100, 130, 0), (130, 255, 255)],
Color.Yellow: [(20, 115, 180), (80, 255, 255)],
}
self._camera = cv2.VideoCapture(1)
self._camera.set(3, 1280)
self._camera.set(4, 720)
def AddPeerAgent(self, peerAgent):
if peerAgent.AgentType == AgentType.LearningAgent:
self._learningAgent = peerAgent
def SendMessage(self, receivingAgents, content):
Agent.SendMessage(self, receivingAgents, content)
def ReceiveMessage(self, senderAgent, content):
if content == "DetectBlocks":
blocks = self.DetectBlocks()
self.SendMessage([senderAgent], blocks)
def Act(self):
raise NotImplementedError
# internal methods
def DetectBlocks(self):
timeout = time.time() + 60
blocks = []
i = 0
while len(blocks) <= 100:
mask = []
contours = []
if time.time() > timeout:
break
frame = self.GrabFrame()
i = i + 1
cv2.imshow("Frame", frame)
key = cv2.waitKey(1) & 0xFF
if(i >= 3):
for color in self._colorRange:
mask = self.ConstructMask(frame, color)
contours = self.GetContours(mask)
blocks.extend(self.ConstructBlocks(contours, color))
return list(set(blocks))
def GrabFrame(self):
(grabbed, frame) = self._camera.read()
return frame
def ConstructMask(self, frame, color):
hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
mask = cv2.inRange(hsv, self._colorRange[color][0], self._colorRange[color][1])
mask = cv2.erode(mask, None, iterations=2)
mask = cv2.dilate(mask, None, iterations=2)
return mask
def GetContours(self, mask):
allContours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)[-2]
return [c for c in allContours if cv2.contourArea(c) >= 30000]
def ConstructBlocks(self, contours, color):
blocks = []
if len(contours) > 0:
for c in contours:
rect = cv2.minAreaRect(c)
box = cv2.boxPoints(rect)
box = np.int0(box)
blocks.append(Block(box, color))
return blocks |
#!/usr/bin/env python
# coding: utf-8
from setuptools import setup, find_packages
from io import open
d = 'Integrates peewee and bottle to produce JSON API compatible web services.'
version = '0.0.1'
setup(
name='corkscrew',
version=version,
description=d,
long_description=open('README.rst', 'r', encoding='utf-8').read(),
author='Stephan Klein',
url='https://github.com/privatwolke/corkscrew',
license='MIT',
packages=find_packages(),
install_requires=['bottle', 'peewee'],
extras_require={
"testing": ['webtest', 'nose', 'nosetests-json-extended', 'coverage']
},
zip_safe=True
)
|
import asyncio
import json
import threading
import websockets
import secrets
import time
from .RainbowSocksClient import RainbowSocksClient
from .RainbowSocksServer import RainbowSocksServer
#loop = asyncio.get_event_loop()
#loop.run_until_complete(run_test(loop))
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
2.13 字符串连接及合并
Created on 2016年7月29日
@author: wang
'''
parts = {'IS', 'Chicago', 'Not', 'Chicago?'}
print ' '.join(parts)
print ','.join(parts)
print ''.join(parts)
a = 'Is Chicago'
b = 'Not Chicago?'
print a + ' ' + b
print('{} {}'.format(a,b))
print(a + ' ' + b)
a = 'Hello' 'World'
print a
'''
s = ' '
for p in parts:
s += p
print s
'''
data = ['ACME', 50 , 91.1]
print ','.join(str(d) for d in data)
'''
print(a + ':' + b + ':' + c)
print(':'.join([a,b,c]))
print(a, b, c, sep=':')
'''
def sample():
yield 'Is'
yield 'Chicago'
yield 'Not'
yield 'Chicago?'
text = ''.join(sample())
print text
'''
for part in sample():
f.write(part)
'''
def combine(source, maxsize):
parts = []
size = 0
for part in source:
parts.append(part)
size += len(part)
if size > maxsize:
yield ''.join(parts)
parts = []
size = 0
yield ''.join(parts)
'''
for part in combine(sample(), 32768)
f.write(part)
'''
|
from datetime import datetime as dt
from dateutil.relativedelta import relativedelta
import numpy as np
import pandas as pd
FOLDS = [
{
"train": [
("2005-01-01", "2015-12-31"),
("2018-01-01", "2020-06-30"),
],
"test": [
("2020-06-01", "2020-12-31"),
]
},
{
"train": [
("2005-01-01", "2020-05-31"),
],
"test": [
("2020-06-01", "2020-12-31"),
]
},
]
def create_folds_seq_with_year(df, intervals, n_months=3, n_years=2, city=None, use_year=False, month_poly_degree=None, other_filters=None):
df = df.copy()
if city:
df = df[df.MUNNAME == city]
if other_filters:
for col, value in other_filters.items():
if isinstance(value, list):
df = df[df[col].apply(lambda x: x in value)]
else:
df = df[df[col] == value]
df = df.groupby(["YEAR", "MONTH"]).count().sort_values(by=["YEAR", "MONTH"]).reset_index()
df.MONTH = df.MONTH.apply(lambda d: dt.strptime(d, "%B").month)
df["DATE"] = df[["YEAR", "MONTH"]].apply(lambda x: dt(x[0], x[1], 1), axis=1)
df = df[["YEAR", "MONTH", "DATE", "DTNASC"]]
X, Y, years = list(), list(), list()
for start, end in intervals:
start_date = dt.strptime(start, "%Y-%m-%d")
end_date = dt.strptime(end, "%Y-%m-%d")
for _, (year, month, date, cnt) in df.iterrows():
date = dt(year, month, 1)
if not (date >= start_date) and (date <= end_date):
continue
features = list()
if use_year:
features.append(year - 2000)
missing_date = False
for i in range(n_months):
query_date = date - relativedelta(months=i+1)
query_df = df[df.DATE == query_date]
if len(query_df) == 0:
missing_date = True
break
else:
features.append(query_df["DTNASC"].item())
for i in range(n_years):
query_date = date - relativedelta(years=i+1)
query_df = df[df.DATE == query_date]
if len(query_df) == 0:
missing_date = True
break
else:
features.append(query_df["DTNASC"].item())
if missing_date:
continue
if month_poly_degree:
month_features = [month ** i for i in range(1, month_poly_degree)]
else:
month_features = [0] * 12
month_features[month - 1] = 1
features.extend(month_features)
X.append(features)
Y.append(cnt)
years.append(year)
X, Y, years = np.stack(X), np.array(Y).reshape(-1, 1), np.array(years)
return X, Y, years
|
import os
all_file = []
def get_all_files(path):
all_file_list = os.listdir(path)
for file in all_file_list:
file_path = os.path.join(path, file)
if os.path.isdir(file_path):
get_all_files(file_path)
all_file.append(file_path)
return all_file
#使用walk()函数
def getallfiles(path):
for dirpath, dirname, filenames in os.walk(path):
for dir in dirname:
all_file.append(os.path.join(dirpath, dir))
for name in filenames:
all_file.append(os.path.join(dirpath, name))
return all_file
if __name__ == '__main__':
path = 'e:\资料'
all_file = get_all_files(path)
for item in all_file:
print(item) |
import numpy as np
import matplotlib.pyplot as plt
# autor: backup_python.dev
theta = np.linspace(0,2*np.pi)
r = 5 + 50*theta
plt.style.use('Solarize_Light2')
plt.figure(figsize=(10, 6), dpi=90)
#theta = np.linspace(0,2*np.pi,1000)
#r = 4**2*np.sin(2*theta)
plt.subplot(111, projection="polar")
plt.plot(theta,r,color="red")
plt.title("GRÁFICAS EN COORDENADAS POLARES",
color='blue',fontsize=12)
plt.show() |
# my solution
def reverse(self, x: int) -> int:
stri = ""
negative = False
if x < 0:
negative = True
stri = str(x)[1:]
else:
stri = str(x)
new = ""
for i in stri:
new = i + new
if int(new) > pow(2, 31) - 1:
return 0
if negative:
new = "-" + new
return int(new) |
from django.db import models
# Create your models here.
class User(models.Model):
user_name = models.CharField(max_length=20)
user_email = models.EmailField(max_length=50)
user_phone = models.IntegerField()
def __str__(self):
return self.user_name |
import moviepy.editor as mpy
from tensorflow.python.keras import activations
from tensorflow.python.keras import backend as K
import tensorflow as tf
import cv2
from matplotlib import cm
try:
from vis.utils import utils
except:
raise Exception("Please install keras-vis: pip install git+https://github.com/autorope/keras-vis.git")
import donkeycar as dk
from donkeycar.parts.tub_v2 import Tub
from donkeycar.utils import *
DEG_TO_RAD = math.pi / 180.0
class MakeMovie(object):
def run(self, args, parser):
'''
Load the images from a tub and create a movie from them.
Movie
'''
if args.tub is None:
print("ERR>> --tub argument missing.")
parser.print_help()
return
conf = os.path.expanduser(args.config)
if not os.path.exists(conf):
print("No config file at location: %s. Add --config to specify\
location or run from dir containing config.py." % conf)
return
self.cfg = dk.load_config(conf)
if args.type is None and args.model is not None:
args.type = self.cfg.DEFAULT_MODEL_TYPE
print("Model type not provided. Using default model type from config file")
if args.salient:
if args.model is None:
print("ERR>> salient visualization requires a model. Pass with the --model arg.")
parser.print_help()
if args.type not in ['linear', 'categorical']:
print("Model type {} is not supported. Only linear or categorical is supported for salient visualization".format(args.type))
parser.print_help()
return
self.model_type = args.type
self.tub = Tub(args.tub)
start = args.start
self.end_index = args.end if args.end != -1 else len(self.tub)
num_frames = self.end_index - start
# Move to the correct offset
self.current = 0
self.iterator = self.tub.__iter__()
while self.current < start:
self.iterator.next()
self.current += 1
self.scale = args.scale
self.keras_part = None
self.do_salient = False
self.user = args.draw_user_input
if args.model is not None:
self.keras_part = get_model_by_type(args.type, cfg=self.cfg)
self.keras_part.load(args.model)
if args.salient:
self.do_salient = self.init_salient(self.keras_part.interpreter.model)
print('making movie', args.out, 'from', num_frames, 'images')
clip = mpy.VideoClip(self.make_frame, duration=((num_frames - 1) / self.cfg.DRIVE_LOOP_HZ))
clip.write_videofile(args.out, fps=self.cfg.DRIVE_LOOP_HZ)
@staticmethod
def draw_line_into_image(angle, throttle, is_left, img, color):
import cv2
height = img.shape[0]
width = img.shape[1]
length = height
a1 = angle * 45.0
l1 = throttle * length
mid = width // 2 + (- 1 if is_left else +1)
p1 = tuple((mid - 2, height - 1))
p11 = tuple((int(p1[0] + l1 * math.cos((a1 + 270.0) * DEG_TO_RAD)),
int(p1[1] + l1 * math.sin((a1 + 270.0) * DEG_TO_RAD))))
cv2.line(img, p1, p11, color, 2)
def draw_user_input(self, record, img, img_drawon):
"""
Draw the user input as a green line on the image
"""
user_angle = float(record["user/angle"])
user_throttle = float(record["user/throttle"])
green = (0, 255, 0)
self.draw_line_into_image(user_angle, user_throttle, False,
img_drawon, green)
def draw_model_prediction(self, img, img_drawon):
"""
query the model for it's prediction, draw the predictions
as a blue line on the image
"""
if self.keras_part is None:
return
expected = tuple(self.keras_part.get_input_shape('img_in')[1:])
actual = img.shape
# if model expects grey-scale but got rgb, covert
if expected[2] == 1 and actual[2] == 3:
# normalize image before grey conversion
grey_img = rgb2gray(img)
actual = grey_img.shape
img = grey_img.reshape(grey_img.shape + (1,))
if expected != actual:
print(f"expected input dim {expected} didn't match actual dim "
f"{actual}")
return
blue = (0, 0, 255)
pilot_angle, pilot_throttle = self.keras_part.run(img)
self.draw_line_into_image(pilot_angle, pilot_throttle, True, img_drawon, blue)
def draw_steering_distribution(self, img, img_drawon):
"""
query the model for it's prediction, draw the distribution of
steering choices, only for model type of Keras Categorical
"""
from donkeycar.parts.keras import KerasCategorical
if self.keras_part is None or type(self.keras_part) is not KerasCategorical:
return
pred_img = normalize_image(img)
angle_binned, _ = self.keras_part.interpreter.predict(pred_img, other_arr=None)
x = 4
dx = 4
y = 120 - 4
iArgMax = np.argmax(angle_binned)
for i in range(15):
p1 = (x, y)
p2 = (x, y - int(angle_binned[i] * 100.0))
if i == iArgMax:
cv2.line(img_drawon, p1, p2, (255, 0, 0), 2)
else:
cv2.line(img_drawon, p1, p2, (200, 200, 200), 2)
x += dx
def init_salient(self, model):
# Utility to search for layer index by name.
# Alternatively we can specify this as -1 since it corresponds to the last layer.
output_name = []
layer_idx = []
for i, layer in enumerate(model.layers):
if "dropout" not in layer.name.lower() and "out" in layer.name.lower():
output_name.append(layer.name)
layer_idx.append(i)
if output_name is []:
print("Failed to find the model layer named with 'out'. Skipping salient.")
return False
print("####################")
print("Visualizing activations on layer:", output_name)
print("####################")
# ensure we have linear activation
for li in layer_idx:
model.layers[li].activation = activations.linear
# build salient model and optimizer
sal_model = utils.apply_modifications(model)
self.sal_model = sal_model
return True
def compute_visualisation_mask(self, img):
img = img.reshape((1,) + img.shape)
images = tf.Variable(img, dtype=float)
if self.model_type == 'linear':
with tf.GradientTape(persistent=True) as tape:
tape.watch(images)
pred_list = self.sal_model(images, training=False)
elif self.model_type == 'categorical':
with tf.GradientTape(persistent=True) as tape:
tape.watch(images)
pred = self.sal_model(images, training=False)
pred_list = []
for p in pred:
maxindex = tf.math.argmax(p[0])
pred_list.append(p[0][maxindex])
grads = 0
for p in pred_list:
grad = tape.gradient(p, images)
grads += tf.math.square(grad)
grads = tf.math.sqrt(grads)
channel_idx = 1 if K.image_data_format() == 'channels_first' else -1
grads = np.sum(grads, axis=channel_idx)
res = utils.normalize(grads)[0]
return res
def draw_salient(self, img):
alpha = 0.004
beta = 1.0 - alpha
expected = self.keras_part.interpreter.model.inputs[0].shape[1:]
actual = img.shape
# check input depth and convert to grey to match expected model input
if expected[2] == 1 and actual[2] == 3:
grey_img = rgb2gray(img)
img = grey_img.reshape(grey_img.shape + (1,))
norm_img = normalize_image(img)
salient_mask = self.compute_visualisation_mask(norm_img)
salient_mask_stacked = cm.inferno(salient_mask)[:,:,0:3]
salient_mask_stacked = cv2.GaussianBlur(salient_mask_stacked,(3,3),cv2.BORDER_DEFAULT)
blend = cv2.addWeighted(img.astype('float32'), alpha, salient_mask_stacked.astype('float32'), beta, 0)
return blend
def make_frame(self, t):
'''
Callback to return an image from from our tub records.
This is called from the VideoClip as it references a time.
We don't use t to reference the frame, but instead increment
a frame counter. This assumes sequential access.
'''
if self.current >= self.end_index:
return None
rec = self.iterator.next()
img_path = os.path.join(self.tub.images_base_path, rec['cam/image_array'])
image_input = img_to_arr(Image.open(img_path))
image = image_input
if self.do_salient:
image = self.draw_salient(image_input)
image = cv2.normalize(src=image, dst=None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_8U)
if self.user: self.draw_user_input(rec, image_input, image)
if self.keras_part is not None:
self.draw_model_prediction(image_input, image)
self.draw_steering_distribution(image_input, image)
if self.scale != 1:
h, w, d = image.shape
dsize = (w * self.scale, h * self.scale)
image = cv2.resize(image, dsize=dsize, interpolation=cv2.INTER_LINEAR)
image = cv2.GaussianBlur(image,(3,3),cv2.BORDER_DEFAULT)
self.current += 1
# returns a 8-bit RGB array
return image
|
import bisect
from collections import Counter
def binary_in(arr, el, start):
pos = bisect.bisect_left(arr, el, start)
return pos < len(arr) and arr[pos] == el
class Solution(object):
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
ans = set()
n = len(nums)
for i in xrange(n):
hash_table = Counter(nums[i:])
for j in xrange(i + 1, n):
hash_table.subtract(nums[j:j+1])
val_k = -nums[i] - nums[j]
if hash_table[val_k]:
ans.add((nums[i], nums[j], val_k))
return map(list, list(ans))
A = Solution()
S = [-1, 0, 1, 2, -1, -4]
print A.threeSum(S)
|
from rest_framework.routers import DefaultRouter
from . import viewsets
router = DefaultRouter()
router.register(
r"topics",
viewsets.TopicViewSet,
basename="topics"
)
router.register(
r"evaluations",
viewsets.EvaluationViewSet,
basename="evaluations"
)
router.register(
r"solutions",
viewsets.SolutionViewSet,
basename="solutions"
)
urlpatterns = router.urls
|
# level 2 solving strategies
# functions here:
# from level2 import Naked_Multiple,Hidden_Multiple,Lines_2,NT,NT_chains,Y_Wing,
def Naked_Multiple(unit):
found = False
for mult in range(3,8):
done = []
for cell in range(len(unit)):
if type(unit[cell]) == str:
if len(unit[cell]) == mult and cell not in done:
if unit.count(unit[cell]) == mult:
indexes = [i for i in range(len(unit)) if unit[i] == unit[cell]]
nums = [i for i in unit[cell]]
done.extend(indexes)
for c in range(len(unit)):
if type(unit[c]) == str and c not in indexes:
for num in nums:
if num in unit[c]:
found = True
unit[c] = unit[c].replace(num,'')
if found:
return [unit,found]
return [unit,found]
def Hidden_Multiple(unit):
found = False
for mult in range(3,8):
nums = [None]
for num in range(1,10):
nums.append([])
if num in unit:
continue
num = str(num)
for ind in range(len(unit)):
if type(unit[ind]) == str:
if num in unit[ind]:
nums[-1].append(ind)
for num in range(1,len(nums)):
if len(nums[num]) == mult:
if nums.count(nums[num]) == mult:
multiple = ''.join([str(v) for v in range(len(nums)) if nums[v] == nums[num]])
for ind in nums[num]:
if unit[ind] != multiple:
found = True
unit[ind] = multiple
if found:
return [unit,found]
return [unit,found]
def Lines_2(grid):
found = False
for num in range(1,10):
rows = []
cols = []
num = str(num)
for x in range(9):
r= [y for y in range(9) if (type(grid[x][y]) == str and num in grid[x][y])]
c= [y for y in range(9) if (type(grid[y][x]) == str and num in grid[y][x])]
rows.append(r)
cols.append(c)
rowind = []
for r in rows:
if len(r) == 2 and rows.count(r) == 2:
s = [i for i in range(len(rows)) if rows[i] == r]
if s not in rowind:
rowind.append(s)
colind = []
for c in cols:
if len(c) == 2 and cols.count(c) == 2:
s = [i for i in range(len(cols)) if cols[i] == c]
if s not in colind:
colind.append(s)
if colind or rowind:
for pair in rowind:
keep = []
c = []
for x in pair:
for y in rows[x]:
if [x,y] not in keep:
keep.append([x,y])
c.append(y)
for y in c:
for x in range(9):
if [x,y] not in keep:
if type(grid[x][y]) == str:
if num in grid[x][y]:
found = True
grid[x][y] = grid[x][y].replace(num,'')
for pair in colind:
keep = []
r = []
for y in pair:
for x in rows[x]:
if [x,y] not in keep:
keep.append([x,y])
r.append(y)
for x in c:
for y in range(9):
if [x,y] not in keep:
if type(grid[x][y]) == str:
if num in grid[x][y]:
found = True
grid[x][y] = grid[x][y].replace(num,'')
return [grid,found]
def NT(grid):
nt = []
for x in range(9):
for y in range(9):
if type(grid[x][y]) == str:
if len(grid[x][y]) == 2:
nt.append([grid[x][y],x,y,str(int(x/3))+str(int(y/3))])
return nt #[ [value,x,y,sqr] , ... ]
def NT_chains(nakedtwos = []):
chains = []
for c in range(len(nakedtwos)):
usedchain = [c]
cell = nakedtwos[c]
available = []
for c2 in range(len(nakedtwos)):
if c2 not in usedchain:
ncell = nakedtwos[c2]
com1 = (cell[0][0] in ncell[0])
com2 = (cell[0][1] in ncell[0])
com = ((com1 or com2))# and com1 != com2 )
inrow = (cell[1] == ncell[1])
incol = (cell[2] == ncell[2])
insqr = (cell[3] == ncell[3])
relevant = ((inrow or incol or insqr) and com)
if relevant:
usedchain.append(c2)
available.append([c2,com1,com2,inrow,incol,insqr])
chains.append(available)
return chains # [[[1, False, True, False, False, True]], [[0, True, False, False, False, True]],
def Y_Wing(grid):
found = False
nakedtwos = NT(grid)
# print(nakedtwos)
if not nakedtwos:
return [grid,found]
chains = NT_chains(nakedtwos)
# print(chains)
for c in range(len(chains)):
cc = chains[c]
if len(cc) > 1:
for cell in cc:
for cell2 in cc:
if cell != cell2:
difval = ( cell[1] == cell2[2] )
rowcol = ( ( cell[3] and cell2[4] ) or ( cell[4] and cell2[3] ) )
rowsqr = ( ( cell[3] and cell2[5] ) or ( cell[5] and cell2[3] ) )
colsqr = ( ( cell[4] and cell2[5] ) or ( cell[5] and cell2[4] ) )
if difval and (rowcol or rowsqr or colsqr):
# almost bingo
indexes = [c,cell[0],cell2[0]]
# print(indexes)
# last check needed
vals = []
for index in indexes:
for v in nakedtwos[index][0]:
vals.append(v)
vals = set(vals)
if len(vals) == 3:
# bingo !
cant = [v for v in vals if v not in nakedtwos[c][0]]
# print(f'nak {c} = {nakedtwos[c][0]}')
# print(f'cant = {cant}')
cant = cant[0]
# print(f' ntc = {nakedtwos[c]}')
# print(f'cant = {cant}')
if rowcol:
if cell[3] and cell2[4]:
# cell in row, == c[x] == cell[x]
# cell2 in col == c[y] == cell2[y]
xx = 1
yy = 2
else:
xx = 2
yy = 1
newy = nakedtwos[indexes[xx]][yy]
newx = nakedtwos[indexes[yy]][xx]
if type(grid[newx][newy]) == str:
if cant in grid[newx][newy]:
found = True
grid[newx][newy] = grid[newx][newy].replace(cant,'')
else: # sqr included
checkxy = []
# print(cell,cell2)
# print(indexes)
if cell[-1]: # sqr
sq = indexes[1]
ot = indexes[2]
ot2 = cell2
# print(sq,ot,ot2)
else:
sq = indexes[2]
ot = indexes[1]
ot2 = cell
if ot2[3]:# row
xx = nakedtwos[ot][1]
yy = nakedtwos[sq][2]
checkxy.append([xx,yy])
xx = nakedtwos[sq][1]
# modx = int(nakedtwos[sq])[3][0]
mody = int(nakedtwos[ot][3][1])
for y in range(mody*3,(mody+1)*3):
checkxy.append([xx,y])
else: # sqr col
yy = nakedtwos[ot][2]
xx = nakedtwos[sq][1]
checkxy.append([xx,yy])
yy = nakedtwos[sq][2]
modx = int(nakedtwos[ot])[3][0]
for x in range(modx*3,(modx+1)*3):
checkxy.append([xx,yy])
# print(checkxy)
for coord in checkxy:
x = coord[0]
y = coord[1]
if type(grid[x][y]) == str:
if cant in grid[x][y]:
found = True
grid[x][y] = grid[x][y].replace(cant,'')
return [grid,found]
|
#using memoization
n = int(input())
dp = [0,1,2,4,7]
if n < 5:
print(dp[n])
print('the program ended')
for i in (5,n):
dp.append(dp[i-3]+dp[i-2]+dp[i-1])
print(dp[-1]) |
import sys
cardTypes = {"T": 0, "C": 0, "G": 0}
for i in sys.stdin:
cardsInput = i.strip("\n")
result = 0
for card in cardsInput:
cardTypes[card] += 1
for card in cardTypes:
totalCardsType = cardTypes[card]
result += totalCardsType*totalCardsType
zeroLess = False
while True:
for card in cardTypes:
cardTypes[card] -= 1
if cardTypes[card] < 0:
zeroLess = True
break
if zeroLess:
break
else:
result += 7
print(result)
exit()
|
from collections import defaultdict, deque
rr = lambda: input()
rri = lambda: int(input())
rrm = lambda: list(map(int, input().split()))
INF=float('inf')
def solve(N,K,B):
groups = defaultdict(list) # alice, bob, both books
for time,alike,blike in B:
if alike and blike:
groups['c'].append(time)
elif alike:
groups['a'].append(time)
elif blike:
groups['b'].append(time)
for g in groups:
groups[g] = sorted(groups[g], reverse=True)
a,b= 0,0
amt = 0
while groups['a'] or groups['b'] or groups['c']:
if not groups['a'] or not groups['b']:
if not groups['c']:
break
amt += groups['c'].pop()
a+=1
b+=1
elif not groups['c'] or groups['a'][-1] + groups['b'][-1] < groups['c'][-1]:
amt += groups['a'].pop()
amt += groups['b'].pop()
a+=1
b+=1
else:
amt += groups['c'].pop()
a+=1
b+=1
if a == K and b == K:
break
if a < K or b < K:
return -1
return amt
n,k = rrm()
books = [] # tuples (time, alice likes it, bob likes it)
for _ in range(n):
time,a,b=rrm()
books.append((time,a,b))
print(solve(n,k,books))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 24 23:21:25 2019
@author: trosales
"""
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.neighbors import KNeighborsRegressor
import pandas as pd
#import matplotlib.pyplot as plt
#Assign03
"""
FIRST DATASET
: File: car.csv
: Car_data
"""
#read in the three data file susing pandas
#read in car.csv
names = ["buying","maint","doors","persons","lug_boot","safety", "status"]
car_data = pd.read_csv('car.csv', header=None, names=names)
#Giving string data numeric value
car_data.buying = car_data.buying.astype('category')
car_data['buying_cat'] = car_data.buying.cat.codes
car_data.maint = car_data.maint.astype('category')
car_data['maint_cat'] = car_data.maint.cat.codes
car_data.lug_boot = car_data.lug_boot.astype('category')
car_data['lug_boot_cat'] = car_data.lug_boot.cat.codes
car_data.safety = car_data.safety.astype('category')
car_data['safety_cat'] = car_data.safety.cat.codes
cleanup = {"doors": {"5more":5},
"persons": {"more":5}}
car_data.replace(cleanup, inplace=True)
#car_data = car_data.drop(columns=['buying', 'maint', 'lug_boot', 'safety'])
car_data = car_data[['doors', 'persons', 'buying_cat', 'maint_cat', 'lug_boot_cat',
'safety_cat', 'status']]
car_data.status = car_data.status.astype('category')
car_data['status_cat'] = car_data.status.cat.codes
car_target = car_data[['status_cat']]
car_data = car_data.drop(columns=['status', 'status_cat'])
#convert dataframe to numpy array
car_data = car_data.values
#.values to convert to numpy Array, ravel to convert to a 1d array
car_target = car_target.values.ravel()
#split up the data
data_train, data_test, target_train, target_test = train_test_split(
car_data, car_target, test_size=0.30)
classifier = KNeighborsClassifier(n_neighbors=5)
classifier.fit(data_train, target_train)
predictions = classifier.predict(data_test)
results = (predictions == target_test)
correct = 0
for i in results:
if i == True:
correct += 1
print("1st Dataset: Car.csv:")
print("There were " + str(correct) + " correct estimates out of " + str(len(results)) +
" for " + str(100 * round(correct / len(results), 2)) + "% accuracy\n")
"""
SECOND DATASET
: File: auto-mpg.data
: mpg_data
"""
#read in auto-mpg.data
mpg_data = pd.read_csv('auto-mpg.data', header=None,
delim_whitespace=True, na_values=["?"])
mpg_data.columns = ["mpg","cylinders","displacement","horsepower","weight",
"acceleration","modelyear","origin","carname"]
#replace unknown values with mean of column
mean = mpg_data['horsepower'].mean()
mpg_data.horsepower = mpg_data.horsepower.fillna(mean)
mpg_target = mpg_data[['mpg']]
mpg_data = mpg_data[['cylinders','displacement','horsepower','weight',
'acceleration','modelyear','origin']]
#Plot to a graph to see visual representation
#mpg_data['horsepower'].plot(kind='hist', bins=100)
#plt.xlabel('MPG Value')
#convert data to numpy array
mpg_target = mpg_target.values.ravel()
mpg_data = mpg_data.values
#split up the data
data_train, data_test, target_train, target_test = train_test_split(
mpg_data, mpg_target, test_size=0.30)
regr = KNeighborsRegressor(n_neighbors=5)
regr.fit(data_train, target_train)
predictions = regr.predict(data_test)
results = (predictions == target_test)
correct = 0
for i in results:
if i == True:
correct += 1
#These are terribly inaccurate
print("Second Dataset: auto-mpg.data:")
print("There were " + str(correct) + " correct estimates out of " + str(len(results)) +
" for " + str(100 * round(correct / len(results), 2)) + "% accuracy\n")
"""
THIRD DATASET
: File: student-mat.csv
: student_data
"""
#read in student-mat.csv
student_data = pd.read_csv('student-mat.csv', sep=";")
#Replacing two-option values
cleanup = {"school": {"GP":0, "MS":1}, "sex": {"M":0, "F":1},
"address": {"U":0, "R":1}, "famsize": {"GT3":0, "LE3":1},
"Pstatus": {"A":0, "T":1}, "schoolsup": {"yes":0, "no":1},
"famsup": {"yes":0, "no":1}, "paid": {"yes":0, "no":1},
"activities": {"yes":0, "no":1}, "nursery": {"yes":0, "no":1},
"higher": {"yes":0, "no":1}, "internet": {"yes":0, "no":1},
"romantic": {"yes":0, "no":1}
}
student_data.replace(cleanup, inplace=True)
#use one-hot encoding for the remaining values
student_data = pd.get_dummies(student_data, columns=['Mjob', 'Fjob',
'reason', 'guardian'])
student_target = student_data[['G3']]
student_data = student_data.drop(columns=['G3'])
#onvert to numpy array
student_target = student_target.values.ravel()
student_data = student_data.values
#split up the data
data_train, data_test, target_train, target_test = train_test_split(
student_data, student_target, test_size=0.30)
regr = KNeighborsRegressor(n_neighbors=5)
regr.fit(data_train, target_train)
predictions = regr.predict(data_test)
correct = 0
for i in results:
if i == True:
correct += 1
#These are terribly inaccurate
print("Third Dataset: student-mat.csv:")
print("There were " + str(correct) + " correct estimates out of " + str(len(results)) +
" for " + str(100 * round(correct / len(results), 2)) + "% accuracy\n")
|
import tensorflow as tf
from tensorflow.keras.layers import Dense, Input, Conv1D, LSTM, Reshape, Flatten, GRU,SimpleRNN
import tensorflow.keras.backend as K
from tensorflow.keras import Model
from tensorflow.keras.optimizers import Adam
import numpy as np
tf.set_random_seed(2212)
class Actor:
def __init__(self, sess, action_dim, observation_dim):
self.action_dim, self.observation_dim = action_dim, observation_dim
K.set_session(sess)
self.sess = sess
self.state_input, self.output, self.model = self.create_model()
self.advantages = tf.placeholder(tf.float32, shape=[None, action_dim])
model_weights = self.model.trainable_weights
log_prob = tf.math.log(self.output + 10e-10)
neg_log_prob = tf.multiply(log_prob, -1)
actor_gradients = tf.gradients(neg_log_prob, model_weights, self.advantages)
grads = zip(actor_gradients, model_weights)
self.optimize = tf.train.AdamOptimizer(0.001).apply_gradients(grads)
def create_model(self):
state_input = Input(shape=self.observation_dim)
rnn_in = tf.expand_dims(state_input, [0])
lstm = LSTM(24,activation='relu')(rnn_in)
state_h1 = Dense(48, activation='relu')(lstm)
state_h2 = Dense(48, activation='relu')(state_h1)
output = Dense(self.action_dim, activation='softmax')(state_h2)
model = Model(inputs=state_input, outputs=output)
adam = Adam(lr=0.001)
model.compile(loss='categorical_crossentropy', optimizer=adam)
return state_input, output, model
def train(self, X, y):
self.sess.run(self.optimize, feed_dict={self.state_input:X, self.advantages:y})
|
def solution(n, money):
dp = [0] * (n + 1)
for m in money:
if m > n:
continue
for i in range(m, n + 1):
if i == m:
dp[i] += 1
else:
dp[i] += dp[i - m]
return dp[n] |
#!/usr/bin/env python
import os
import json
import sys
from elasticsearch import Elasticsearch
es = Elasticsearch()
if not (len(sys.argv) == 2):
print "USAGE:", sys.argv[0], "<path to dir with .json files>"
sys.exit(1)
for base, subdirs, files in os.walk(sys.argv[1]):
for name in files:
if name.endswith('.json'):
path = base + '/' + name
with open(path, 'r') as fp:
dzien_wystapien_json = json.load(fp)
for wystapienie in dzien_wystapien_json:
es.index(index='sejmngram', doc_type='wystapienie', id=wystapienie['id'], body=wystapienie)
print 'Processed wystapienie id:', wystapienie['id']
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 4 11:53:10 2020
TF-functions copied from https://github.com/Hvass-Labs/TensorFlow-Tutorials/blob/master/14_DeepDream.ipynb
"""
import PIL as PIL
import numpy as np
import tensorflow as tf
import utils as utils
import tensorflow.compat.v1 as tfc
import math
import time as time
model_fn = "tensorflow_inception_graph.pb"
inception_download_url="http://storage.googleapis.com/download.tensorflow.org/models/inception5h.zip"
save_folder="./renderedImages/4"
###Activate GPU rendering if possible
device_name='CPU:0'
if len(tf.config.experimental.list_physical_devices('GPU'))>0:
device_name='GPU:0'
with tfc.device(device_name):
graph = tfc.Graph()
sess = tfc.InteractiveSession(graph=graph)
#Load model and build Graph
print('Ignore the following Gfile-warning:')
with tfc.gfile.FastGFile(model_fn, 'rb') as f, tfc.device(device_name):
###
utils.maybe_download_and_extract(inception_download_url, ".")
graph_def = tfc.GraphDef()
graph_def.ParseFromString(f.read())
t_input = tfc.placeholder(np.float32, name = 'input')
#default 117.0
imagenet_mean = 117.0
t_preprocessed = tf.expand_dims(t_input-imagenet_mean, 0)
tfc.import_graph_def(graph_def, {'input':t_preprocessed})
def T(layer):
#print("Processing...")
'''Helper for getting layer output tensor'''
return graph.get_tensor_by_name("import/%s:0" % layer)
def tffunc(*argtypes):
'''Helper that transforms TF-graph generating function into a regular one.
See "resize" function below.
'''
placeholders = list(map(tfc.placeholder, argtypes))
def wrap(f):
out = f(*placeholders)
def wrapper(*args, **kw):
return out.eval(dict(zip(placeholders, args)), session=kw.get('session'))
return wrapper
return wrap
# Helper function that uses TF to resize an image
def resize(img, size):
img = tf.expand_dims(img, 0)
return tfc.image.resize_bilinear(img, size)[0,:,:,:]
resize = tffunc(np.float32, np.int32)(resize)
def get_tile_size(num_pixels, tile_size=400):
num_tiles = int(round(num_pixels / tile_size))
num_tiles = max(1, num_tiles)
actual_tile_size = math.ceil(num_pixels / num_tiles)
return actual_tile_size
def calc_grad_tiled(img, t_grad, tile_size=550):
'''Compute the value of tensor t_grad over the image in a tiled way.
Random shifts are applied to the image to blur tile boundaries over
multiple iterations.'''
sz = tile_size
h, w = img.shape[:2]
sx, sy = np.random.randint(sz, size=2)
img_shift = np.roll(np.roll(img, sx, 1), sy, 0)
grad = np.zeros_like(img)
for y in range(0, max(h-sz//2, sz),sz):
for x in range(0, max(w-sz//2, sz),sz):
sub = img_shift[y:y+sz,x:x+sz]
with tfc.device(device_name):
g = sess.run(t_grad, {t_input:sub})
grad[y:y+sz,x:x+sz] = g
return np.roll(np.roll(grad, -sx, 1), -sy, 0)
def set_layer(layer, squared, int1, int2):
with tfc.device(device_name):
if squared:
t_obj=tfc.square(T(layer)[:,:,:,int1:int2])
else:
t_obj=T(layer)[:,:,:,int1:int2]
t_score = tfc.reduce_mean(t_obj) # defining the optimization objective
t_grad = tfc.gradients(t_score, t_input)[0]
return t_grad
def dream_image(image, settings, out_name):
print("Processing...")
####Global settings for every renderer
img = image
orig_img=image#maybe clean up
#Iterations and octaves
iterations=settings['iterations']
octave_n=settings['octaves']
octave_scale=settings['octave_scale']
iteration_descent=settings['iteration_descent']
#Additional Settings
save_gradient=settings['save_gradient']
background_color=settings['background']
#tf.random.set_seed(settings.tf_seed)#not working?
#Renderers
renderers=settings['renderers']
###Set layers and channels
t_obs=[]
for r in renderers:
t_obs.append(set_layer(r['layer'], r['squared'], r['f_channel'], r['l_channel']))
##Create background
g_sum = np.zeros_like(img)
# split the image into a number of octaves
octaves = []
g_sums = []
#Prepare Image & backgrounds for every octave
for i in range(octave_n - 1):
hw = img.shape[:2]
lo = resize(img, np.int32(np.float32(hw) / octave_scale))
hi = img - resize(lo, hw)
img = lo
octaves.append(hi)
lo = resize(g_sum, np.int32(np.float32(hw) / octave_scale))
hi = g_sum - resize(lo, hw)
g_sum = lo
g_sums.append(hi)
# generate details octave by octave
for octave in range(octave_n):
##Prepare current Octave
if octave > 0:
hi = octaves[-octave]
img = resize(img, hi.shape[:2]) + hi
hi_g =g_sums[-octave]
g_sum = resize(g_sum, hi.shape[:2]) + hi_g
##More Preperations
bounds=utils.get_bounds(img.shape[1], img.shape[0], renderers)
iteration_masks=[]
for r in renderers:
if r['masked']:
iteration_masks.append(resize(r['mask'], img.shape[:2])/255)#move up, /255 just once
else:
iteration_masks.append([])
orig_img_m=resize(image,img.shape[:2])/255#move up, /255 just once
####Iterations
for iteration in range(iterations-octave*iteration_descent):
print("Iteration "+str(iteration+1)+" / "+str(iterations-octave*iteration_descent) + " Octave: " +str(octave+1)+" / "+str(octave_n))
####Gradient
gradients=[]
for i in range(len(renderers)):
if (iteration+1)%renderers[i]['render_x_iteration']==0:
start_time=time.time()
##Pre Gradient preperations
#Crop the image
t_img=img[bounds[i][2]:bounds[i][3],bounds[i][0]:bounds[i][1]]
#Rotate if true
if renderers[i]['rotate']:
t_img=np.rot90(t_img, renderers[i]['rotation'])
##Get the gradient
g=calc_grad_tiled(t_img,
t_obs[i],
tile_size=renderers[i]['tile_size'])
g=g * (renderers[i]['step_size'] / (np.abs(g).mean() + 1e-7))
##
##Gradient manipulations:
##
#Rotate back if necessary
if renderers[i]['rotate']:
g=np.rot90(g, 4-renderers[i]['rotation'])
#Masking the gradient
if renderers[i]['masked']:
g*=iteration_masks[i][bounds[i][2]:bounds[i][3],bounds[i][0]:bounds[i][1]]
#Color Correction
if renderers[i]['color_correction']:
g=utils.gradient_grading(g, orig_img_m[bounds[i][2]:bounds[i][3],bounds[i][0]:bounds[i][1]],
method=renderers[i]['cc_vars'][0],
fr=renderers[i]['cc_vars'][1],
fg=renderers[i]['cc_vars'][2],
fb=renderers[i]['cc_vars'][3])
##Adding the finalized Gradient to list
gradients.append(g)
print('Finished computing Gradient for Renderer {} in {}s'.format(i, time.time()-start_time))
else:
gradients.append(np.zeros_like(img))
for i in range(len(bounds)):
img[bounds[i][2]:bounds[i][3],bounds[i][0]:bounds[i][1]] += gradients[i]
g_sum[bounds[i][2]:bounds[i][3],bounds[i][0]:bounds[i][1]]+= gradients[i]
####Save Image
if settings['color_correction']:
img=image+utils.gradient_grading(g_sum, image/255,
method=settings['cc_vars'][0],
fr=settings['cc_vars'][1],
fg=settings['cc_vars'][2],
fb=settings['cc_vars'][3])
g_sum[:,:]+=background_color
utils.save_image(img, out_name)
if save_gradient:
utils.save_image(g_sum, 'gradient_'+out_name)
|
# -*- coding: utf-8 -*-
import scrapy
import redis
from scrapy_splash import SplashRequest
from standalone.items import StandaloneItem
from scrapy.spiders import CrawlSpider
from urllib.parse import urlparse
from scrapy.linkextractors.lxmlhtml import LxmlLinkExtractor
import re
hostip='127.0.0.1'
job_redis = redis.Redis(host=hostip)
class StandaloneSpiderSpider(CrawlSpider):
name = 'standalone_spider'
data = open('list.txt', 'r').readlines()
allow_domains=['{uri.netloc}'.format(uri=urlparse(i)).strip() for i in data]
start_urls = [domain.strip() for domain in data]
maximumPagesPerSite=19
http_user = 'user'
http_pass='userpass'
# Using Splash to handle requests
def start_requests(self):
for url in self.start_urls:
splashrequest=SplashRequest(url, self.parse,endpoint='render.html',headers={'User-Agent': "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36"},args={'wait':0.5,'allowed_domains':self.allow_domains},)
splashrequest.errback=self.errback
yield splashrequest
def parse(self, response):
parsed_uri = urlparse(response.url)
domainurl = '{uri.netloc}'.format(uri=parsed_uri)
# If the amount of downloaded pages of one site exceeds the limit, all following requests of the same domain will be removed from the queue
if int(job_redis.hlen(domainurl)) > self.maximumPagesPerSite:
regex = re.compile(r'\b'+domainurl+'\b')
if len(filter(lambda i: regex.search(i), self.start_urls))>0:
for item in filter(lambda i: regex.search(i), self.start_urls):
self.start_urls.remove(item)
return
# Remove urls containing anchor mark, phone numbers, emails and login pages
for link in LxmlLinkExtractor(deny=[r'[\S\s]*#[\S\s]*',r'[\S\s]*\/tel:[\S\s]*',r'[\S\s]*\/fax:[\S\s]*',r'[\S\s]*\/mailto:[\S\s]*',r'[\S\s]*\/login[\S\s]*',r'[\S\s]*\/\+[0-9]*$'],allow_domains=self.allow_domains).extract_links(response):
if int(job_redis.hlen(domainurl)) > self.maximumPagesPerSite:
break
else:
self.start_urls.append(link.url)
# Add sites having respond code from 400 to 600 to a list
if response.status in range(400, 600):
job_redis.sadd('error',response.url)
else:
item=StandaloneItem()
tempinput=response.xpath("//body")
#Extract the domain, title ,text and url of a website
if tempinput:
templist=[]
templist.append(re.sub(r'\s+', ' ',tempinput.extract()[0].strip()))
item['domain']=[domainurl]
item['data'] = templist
item['title']=response.xpath("normalize-space(//title)").extract()
item['link']=[response.url]
return item
else:
job_redis.sadd('error',response.url)
# Error callback for Splash
def errback(self,failure):
if (hasattr(failure,'response')):
responseurl=failure.value.response.url
job_redis.sadd('error',responseurl)
|
menu = {
'Appetizers': ['Wings', 'Cookies', 'Spring Rolls'],
'Entrees': ['Salmon', 'Steak', 'Meat Tornado', 'A Literal Garden'],
'Desserts': ['Ice Cream', 'Cake', 'Pie'],
'Drinks': ['Coffee', 'Tea', 'Unicorn Tears'],
}
def print_welcome():
print('*' * 38 + '\n' + '*' * 2 + ' ' * 4 + 'Welcome to the Snakes Cafe!' + ' ' * 3 + '*' * 2 + '\n' + '*' * 2 + ' ' * 4 + 'Please see our menu below.' + ' ' * 4 + '*' * 2 + '\n' + '*' * 2 + ' ' * 36 + '\n' + '*' * 2 + ' ' * 1 + 'To quit at any time, type "quit"' + ' ' * 1 + '*' * 2 + '\n' + '*' * 38 + '\n')
def print_order():
print('*' * 35 + '\n' + '*' * 2 + ' ' * 1 + 'What would you like to order?' + ' ' * 1 + '*' * 2 + '\n' + '*' * 35)
def print_menu():
for x, y in menu.items():
print (x)
print ('-' * 8)
for i in y:
print(i)
print ('\n')
def get_menu():
all_food = []
for x, y in menu.items():
for i in y:
all_food.append(i)
return all_food
def take_order():
all_food = get_menu()
orders = {}
order = input()
while order != 'quit':
if order in all_food:
if order in orders:
orders[order] += 1
else:
orders[order] = 1
print(f'** {orders[order]} of {order} have been added to your meal **')
else:
print("Please only order from the menu!")
order = input()
print_welcome()
print_menu()
print_order()
take_order()
|
import FWCore.ParameterSet.Config as cms
process = cms.Process("PROD2")
process.Tracer = cms.Service('Tracer',
dumpContextForLabels = cms.untracked.vstring('intProducer'),
dumpNonModuleContext = cms.untracked.bool(True)
)
process.MessageLogger = cms.Service("MessageLogger",
cout = cms.untracked.PSet(
Tracer = cms.untracked.PSet(
limit = cms.untracked.int32(100000000)
),
default = cms.untracked.PSet(
limit = cms.untracked.int32(0)
),
enable = cms.untracked.bool(True)
)
)
process.options = cms.untracked.PSet(
numberOfStreams = cms.untracked.uint32(1),
numberOfConcurrentRuns = cms.untracked.uint32(1),
numberOfConcurrentLuminosityBlocks = cms.untracked.uint32(1)
)
process.source = cms.Source("PoolSource",
fileNames = cms.untracked.vstring(
'file:testGetBy1.root'
)
)
process.out = cms.OutputModule("PoolOutputModule",
fileName = cms.untracked.string('testGetBy2.root')
)
process.intProducer = cms.EDProducer("IntProducer", ivalue = cms.int32(2))
process.intProducerU = cms.EDProducer("IntProducer", ivalue = cms.int32(20))
process.intVectorProducer = cms.EDProducer("IntVectorProducer",
count = cms.int32(9),
ivalue = cms.int32(21)
)
process.t = cms.Task(process.intProducerU, process.intVectorProducer)
process.p = cms.Path(process.intProducer, process.t)
process.e = cms.EndPath(process.out)
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.spiders import Spider
import re
from scrapy import Request
from scrapy.pipelines.images import ImagesPipeline
from scrapy.exceptions import DropItem
from scrapy import Request
from scrapy import log
import os
from shutil import move
class ExhentaiPipeline(object):
def process_item(self, item, spider):
return item
class ExhentaiDownloadPipeline(ImagesPipeline):
cookies = {
"igneous":"942c9e50d",
"ipb_member_id":"2950929",
"ipb_pass_hash":"a8f176f10adc6fd2273d1f6a40804aba",
"lv":"1522650532-1523844928",
"s":"7a35705f2",
"sk":"nfirszmhxpxzezi6t522ahv6n76t"
}
def get_media_requests(self, item, info):
image_url = item['image_urls']
yield Request(image_url, cookies=self.cookies)
def item_completed(self, results, item, info):
old_image_paths = os.path.join("exhentai","media",[x['path'] for ok, x in results if ok][0])
new_image_paths = item['image_paths']
if not os.path.isdir(item["image_title"]):
os.mkdir(item["image_title"])
move(os.path.abspath(old_image_paths), new_image_paths)
return item
|
n = int(input('Podaj liczbę od 1 do 10: '))
for i in range(1, 11, ):
print(n, 'x', i, '=', n * i)
|
# Generated by Django 2.0.5 on 2018-05-21 20:51
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('room', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tile',
name='desc',
field=models.TextField(blank=True),
),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.