content
stringlengths 0
1.05M
| origin
stringclasses 2
values | type
stringclasses 2
values |
|---|---|---|
#!/usr/bin/python
#
# Example of complex
# Graph representing a network
#
import gvgen
# Creates the new graph instance
graph = gvgen.GvGen(None, "overlap=\"scale\";\nlabelfloat=\"true\";\nsplines=\"true\";")
# We define different styles
graph.styleAppend("router", "shapefile", "router.png")
graph.styleAppend("router", "color", "white")
graph.styleAppend("router", "label", "")
# Creates items
insidenet = graph.newItem("Inside network")
internet = graph.newItem("Internet")
win1 = graph.newItem("Windows", insidenet)
win2 = graph.newItem("Windows", insidenet)
linux = graph.newItem("Linux", insidenet)
hurd = graph.newItem("GNU/Hurd", insidenet)
sun = graph.newItem("Sun", internet)
router = graph.newItem("Router")
# Time to apply styles and set some properties
graph.styleApply("router", router)
graph.propertyAppend(win1, "shapefile", "wingdows.png")
graph.propertyAppend(win2, "shapefile", "wingdows.png")
graph.propertyAppend(linux, "shapefile", "linux.png")
graph.propertyAppend(hurd, "shapefile", "hurd.png")
graph.propertyAppend(sun, "shapefile", "sun.png")
graph.propertyAppend(win1, "label", "")
graph.propertyAppend(win2, "label", "")
graph.propertyAppend(linux, "label", "")
graph.propertyAppend(hurd, "label", "")
graph.propertyAppend(sun, "label", "")
# Links from "foo" to "bar"
graph.newLink(win1, router)
graph.newLink(win2, router)
graph.newLink(linux, router)
graph.newLink(hurd, router)
graph.newLink(router, sun)
# Outputs the graphviz code
graph.dot()
|
nilq/baby-python
|
python
|
"""
Functions related to calculating the rotational energy of asymmetric
molecules. Townes and Schawlow, Ch. 4
"""
from pylab import poly1d
def asymmetry (A,B,C):
"""
Ray's asymmetry parameter for molecular rotation.
For a prolate symmetric top (B = C), kappa = -1.
For an oblate symmetric top (B = A), kappa = +1.
See Townes and Schawlow, Ch. 4.
"""
return (2.*B - A - C)/(A - C)
def b_prolate(kappa):
"""
0 = prolate <= b_P <= -1 = oblate
Townes and Schawlow, Ch. 4
"""
return (kappa+1.)/(kappa-3.)
def b_oblate(kappa):
"""
-1 = oblate <= b_O <= 0 = prolate
Townes and Schawlow, Ch. 4
"""
return (kappa-1.)/(kappa+3.)
def asym_quantum_factor(J,b):
"""
This takes the places of K^2 in calculating the energy levels
for asymmetric rotators. Townes and Schawlow, Ch. 4. For
J > 6 this returns an empty tuple. Note that it doesn't matter which version of
b is used since b_prolate(kappa) = b_oblate(-kappa) and the equations are
symmetric in b or depend on b**2.
"""
roots = ()
if J == 0:
roots = (0,)
elif J == 1:
roots = (0., 1+b, 1-b)
elif J == 2:
roots = ( 4., 1-3*b, 1+3*b)
p = poly1d([1, -4, -12*b**2])
roots = roots + tuple(p.r)
elif J == 3:
roots = (4.,)
p = poly1d([1, -4, -60*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -10+6*b, 9-54*b-15*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -10-6*b, 9+54*b-15*b**2])
roots = roots + tuple(p.r)
elif J == 4:
p = poly1d([1, -10*(1-b), 9-90*b-63*b**2])
roots = tuple(p.r)
p = poly1d([1, -10*(1+b), 9+90*b-63*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -20, 64-28*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -20, 64-208*b**2, 2880*b**2])
roots = roots + tuple(p.r)
elif J == 5:
p = poly1d([1, -20, 64-108*b**2])
roots = tuple(p.r)
p = poly1d([1, -20, 64-528*b**2,6720*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -35+15*b, 259-510*b-213*b**2, -225+3375*b+4245*b**2-675*b**3])
roots = roots + tuple(p.r)
p = poly1d([1, -35-15*b, 259+510*b-213*b**2, -225-3375*b+4245*b**2+675*b**3])
roots = roots + tuple(p.r)
elif J == 6:
p = poly1d([1, -35+21*b, 259-714*b-525*b**2, -225+4725*b+9165*b**2-3465*b**3])
roots = tuple(p.r)
p = poly1d([1, -35-21*b, 259+714*b-525*b**2, -225-4725*b+9165*b**2+3465*b**3])
roots = roots + tuple(p.r)
p = poly1d([1, -56, 784-336*b**2, -2304+9984*b**2])
roots = roots + tuple(p.r)
p = poly1d([1, -56, 784-1176*b**2, -2304+53664*b**2, -483840*b**2+55440*b**4])
roots = roots + tuple(p.r)
else:
roots = ()
return roots
def Ejk(A,B,C,J):
"""
Rotational energy of an asymmetric molecule using Eq. 4-4 and 4-5 in
Townes and Schawlow. Returns energies in units used for A,B and C
"""
kappa = asymmetry(A,B,C)
# assume prolate form
b_P = b_prolate(kappa)
ws = asym_quantum_factor(J,b_P)
# print "w's=",ws
result = []
for w in ws:
result.append( (B+C)*J*(J+1)/2. + (A - (B+C)/2. )*w )
return result
def plot_E_vs_kappa(Amax,C,maxJ):
"""
Plots diagram showing how energy of an asymmetric rotor depends on its
asymmetry as it varies between prolate and oblate, and how the J(-K,+K)
labelling arises. Townes and Schawlow , Ch. 4.
This assumes that the volume of the moment of inertia ellipsoid is a
constant::
Ia*Ib*Ic = 3*V/(2*pi)
or::
(h/8 pi**2)**3(ABC) = 3*V/(2*pi)
or::
A*B*C = K, a constant
The ellipsoid's minimum semi-axis C is also a constant. So in the prolate case,
B=C and K = Amax*C*C. In the oblate case, Amin=B and K = A*A*C.
The constraints are then::
2*B = (1+kappa)*A + (1-kappa)*C
and::
A = Amax*C/B
"""
n_kappas = 21
kappas = linspace(-1,1,n_kappas)
for J in range(maxJ):
n_columns = 2*J+1
# create a matrix on n_kappas rows and n_
E = zeros((n_kappas,n_columns),float)
for i in range(n_kappas):
kappa = kappas[i]
p = poly1d([2,(kappa-1)*C,-(kappa+1)*Amax*C])
if p.r[0] > 0.:
B = p.r[0]
else:
B = p.r[1]
print B
A = Amax*C/B
# This should yield n_columns of energy values for this kappa
Es = Ejk(A,B,C,J)
E[i,:] = Es
# Now we have a 2D array of energies to plot
for k in range(n_columns):
# select a line style and plot
if J%3 == 0:
ls = "-"
elif J%3 == 1:
ls = "--"
elif J%3 == 2:
ls = ":"
else:
ls = "-."
plot(kappas,E[:,k],label=r"$J_{\tau}="+str(J)+"_{"+str(k-J)+"}$",ls=ls,lw=2)
# label the lines
for K in range(J+1):
# For prolate, B=C
E_prolate = C*J*(J+1)+(Amax-C)*K**2
# For oblate, B=A, using the last value of A
E_oblate = A*J*(J+1)+(C-A)*K**2
text(-0.98+0.07*(J%2),E_prolate,r"$"+str(J)+"_{"+str(K)+"}$")
text( 0.93-0.07*(J%2),E_oblate,r"$"+str(J)+"_{"+str(K)+"}$")
def test(A,B,C,maxJ):
"""
Checks the calculation of energy levels. For example, for water::
>>> test(835.83910,435.347353,278.139826,5)
0 [0.0]
1 [713.48717899999997, 1113.978926, 1271.186453]
2 [4056.8435790000003, 2855.3683380000002, 2383.7457570000001,
4094.7813912145548, 2102.5237247854457]
3 [6197.3051160000005, 6374.3863667070382, 4103.8418232929625,
8620.1335561107444, 5204.2902778892558, 8614.2071531433267, 4266.9715188566752]
4 [11569.54077149916, 8277.1955485008384, 11529.522215260266, 6745.1388347397333,
14830.335414585266, 9021.318375414734, 14831.098979756451, 9490.7431163792135,
6664.6834838643363]
Divide by 29.997 to convert GHz to 1/cm
"""
for J in range(maxJ):
print J,Ejk(A,B,C,J)
if __name__ == "__main__":
C=25
A=125
maxK = 4
plot_E_vs_kappa(A,C,maxK)
a = axis()
b = [a[0], a[1], -10, a[3]]
axis(b)
rcParams.update({'legend.fontsize': 10})
legend(loc=9)
title(r"$\mathrm{Asymmetric~rotor,~A}_{\mathrm{max}}=$"+str(A)+"$,~\mathrm{C}=$"+str(C))
xlabel(r"$\mathrm{Asymmetry}$")
ylabel(r'$E(J,\tau)/h~(GHz)$')
show()
|
nilq/baby-python
|
python
|
import matplotlib.pyplot as plt
import numpy as np
import torch
# from scipy.special import softmax
# from mpl_toolkits.axes_grid1 import make_axes_locatable
def compare_distogram(outputs, targets):
plt.figure(num=1, figsize=[15, 10])
plt.clf()
# names = ['Distance','Omega','Phi','Theta']
names = ['dNN','dCaCa','dCbCb','dNCa','dNCb','dCaCb']
n = len(targets)
for i,(output,target,name) in enumerate(zip(outputs,targets,names)):
if isinstance(output, torch.Tensor):
output = torch.squeeze(output[-1,:,:]).cpu().detach().numpy()
if isinstance(target, torch.Tensor):
target = torch.squeeze(target[-1,:,:]).cpu().detach().numpy()
mask = target > 0
plt.subplot(n,3, i*n+1)
plt.imshow(output, vmin=0)
plt.colorbar()
tit = name + "(prediction)"
plt.title(tit)
plt.subplot(n,3, i*n+2)
plt.imshow(target, vmin=0)
plt.colorbar()
tit = name + "(target)"
plt.title(tit)
plt.subplot(n, 3, i * n + 3)
plt.imshow(np.abs(mask * output - target), vmin=0)
plt.colorbar()
tit = name + "(diff)"
plt.title(tit)
plt.pause(0.5)
return
def plotfullprotein(p1,p2,p3,t1,t2,t3):
plt.figure(num=2, figsize=[15, 10])
plt.clf()
n = t1.shape[1]
axes = plt.axes(projection='3d')
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.set_zlabel("z")
line1 = axes.plot3D(t1[0, :], t1[1, :], t1[2, :], 'red', marker='x')
a = t1[0,:].T
b = t2[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[1,:].T
b = t2[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[2,:].T
b = t2[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line2 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'red', marker='d')
a = t1[0,:].T
b = t3[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[1,:].T
b = t3[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = t1[2,:].T
b = t3[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line3 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'red', marker='o')
line1 = axes.plot3D(p1[0, :], p1[1, :], p1[2, :], 'blue', marker='x')
a = p1[0,:].T
b = p2[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[1,:].T
b = p2[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[2,:].T
b = p2[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line2 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'blue', marker='d')
a = p1[0,:].T
b = p3[0,:].T
tx = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[1,:].T
b = p3[1,:].T
ty = np.concatenate((a[:,None],b[:,None]),axis=1)
a = p1[2,:].T
b = p3[2,:].T
tz = np.concatenate((a[:,None],b[:,None]),axis=1)
for i in range(n):
line3 = axes.plot3D(tx[i,:], ty[i,:], tz[i,:], 'blue', marker='o')
plt.pause(0.5)
return
def plotcoordinates(pred,target):
plt.figure(num=1, figsize=[15, 10])
plt.clf()
axes = plt.axes(projection='3d')
axes.set_xlabel("x")
axes.set_ylabel("y")
axes.set_zlabel("z")
line = axes.plot3D(pred[0,:],pred[1,:], pred[2,:], 'green', marker='x')
line2 = axes.plot3D(target[0,:],target[1,:], target[2,:], 'red', marker='x')
plt.pause(2.5)
return
|
nilq/baby-python
|
python
|
import functools
import requests
import suds.transport as transport
import traceback
try:
import cStringIO as StringIO
except ImportError:
import StringIO
__all__ = ['RequestsTransport']
def handle_errors(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
try:
return f(*args, **kwargs)
except requests.HTTPError as e:
buf = StringIO.StringIO(e.response.content)
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
e.response.status_code,
buf,
)
except requests.RequestException:
raise transport.TransportError(
'Error in requests\n' + traceback.format_exc(),
000,
)
return wrapper
class RequestsTransport(transport.Transport):
def __init__(self, session=None):
transport.Transport.__init__(self)
self._session = session or requests.Session()
@handle_errors
def open(self, request):
resp = self._session.get(request.url)
resp.raise_for_status()
return StringIO.StringIO(resp.content)
@handle_errors
def send(self, request):
resp = self._session.post(
request.url,
data=request.message,
headers=request.headers,
)
if resp.headers.get('content-type') not in ('text/xml',
'application/soap+xml'):
resp.raise_for_status()
return transport.Reply(
resp.status_code,
resp.headers,
resp.content,
)
|
nilq/baby-python
|
python
|
'''
@author: Frank
'''
import zstacklib.utils.http as http
import zstacklib.utils.log as log
import zstacklib.utils.plugin as plugin
import zstacklib.utils.jsonobject as jsonobject
import zstacklib.utils.daemon as daemon
import zstacklib.utils.iptables as iptables
import os.path
import functools
import traceback
import pprint
logger = log.get_logger(__name__)
TESTAGENT_PORT = 9393
class TestAgentError(Exception):
''' test agent failed '''
class TestAgent(plugin.Plugin):
pass
class TestAgentServer(object):
http_server = http.HttpServer(port=TESTAGENT_PORT)
http_server.logfile_path = log.get_logfile_path()
def __init__(self):
self.plugin_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'plugins')
self.plugin_rgty = plugin.PluginRegistry(self.plugin_path)
def start(self, in_thread=True):
self.plugin_rgty.configure_plugins({})
self.plugin_rgty.start_plugins()
if in_thread:
self.http_server.start_in_thread()
else:
self.http_server.start()
def stop(self):
self.plugin_rgty.stop_plugins()
self.http_server.stop()
class AgentResponse(object):
def __init__(self, success=True, error=None):
self.success = success
self.error = error if error else ''
class AgentCommand(object):
def __init__(self):
pass
def replyerror(func):
@functools.wraps(func)
def wrap(*args, **kwargs):
try:
return func(*args, **kwargs)
except Exception as e:
content = traceback.format_exc()
err = '%s\n%s\nargs:%s' % (str(e), content, pprint.pformat([args, kwargs]))
rsp = AgentResponse()
rsp.success = False
rsp.error = err
logger.warn(err)
raise TestAgentError('500 Internal Server Error: %s' % err)
return wrap
class TestAgentDaemon(daemon.Daemon):
def __init__(self, pidfile):
super(TestAgentDaemon, self).__init__(pidfile)
def run(self):
self.agent = TestAgentServer()
self.agent.start(False)
def build_http_path(ip, path):
return 'http://%s:%s/%s' % (ip, str(TESTAGENT_PORT), path.lstrip('/'))
|
nilq/baby-python
|
python
|
#a POC of queue manager with two button to increment and decrement the current value and broadcast it by wifi
import machine
from machine import I2C, Pin
import time
import network
#set to True to enable a display, False to disable it
use_display=True
if use_display:
#display setup, i have a 128x32 oled on this board, tune the values to your display
import ssd1306
rst = Pin(16, Pin.OUT)
rst.value(1)
scl = Pin(5, Pin.OUT, Pin.PULL_UP)
sda = Pin(4, Pin.OUT, Pin.PULL_UP)
i2c = I2C(scl=scl, sda=sda, freq=450000)
oled = ssd1306.SSD1306_I2C(128, 32, i2c, addr=0x3c)
#service function to redraw a display with a string
def draw_display(text):
if use_display:
oled.fill(0)
oled.text(str(text),5,15)
oled.show()
else:
print('You are at:', counter)
#service function to generate the network name
def essid_rename(actual_counter):
essid=essid_base+str(actual_counter)
ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password='some random char 12345678900000**')
ap_if.active(True)
#setup the button up to pin 12
button_up = machine.Pin(12, machine.Pin.IN, machine.Pin.PULL_UP)
#setup the button down to pin 0
button_down = machine.Pin(0, machine.Pin.IN, machine.Pin.PULL_UP)
#seconds of sleep between consecutive button press, to avoid multiple readings
button_pause=0.1
#counter value
counter=0
#combo button status, to avoid increment and decrement counter after a combo pressure
combo=False
#setup wireles interface
ap_if = network.WLAN(network.AP_IF)
#configure a string for the essid base name
essid_base="It's the turn of:"
#just clean the oled
draw_display("Press a button")
print("Press a button...")
#let's start an infinite loop to keep checking the status of the buttons
while True:
#reset function, pressing both buttons will reset the counter to 0
if not button_up.value() and not button_down.value():
print('Combo Button pressed!', counter)
counter=0
combo=True
draw_display('Reset complete')
time.sleep(2)
draw_display('We serve:'+str(counter))
essid_rename(counter)
if not button_up.value() and not combo:#up button counter
counter+=1
print('Button up pressed!', counter)
draw_display('We serve:'+str(counter))
essid_rename(counter)
time.sleep(button_pause)
if not button_down.value() and not combo:#down button counter plus negative number check
if counter>0:
counter-=1
else:
counter=0
print('Button down pressed!', counter)
draw_display('We serve:'+str(counter))
essid_rename(counter)
time.sleep(button_pause)
#reset combo button status
combo=False
|
nilq/baby-python
|
python
|
import sys
import PyNexusZipCrawler
# GET NEXUS MODS MOD ID FROM USER INPUT
f_id = raw_input("Enter the Nexus Mods File ID you want to crawl: ")
# CRAWL IT
PyNexusZipCrawler.crawl_zip_content(f_id, "110")
|
nilq/baby-python
|
python
|
import logging
import os
import pyaudio, wave, pylab
import numpy as np
import librosa, librosa.display
import matplotlib.pyplot as plt
from scipy.io.wavfile import write
from setup_logging import setup_logging
INPUT_DEVICE = 0
MAX_INPUT_CHANNELS = 1 # Max input channels
DEFAULT_SAMPLE_RATE = 44100 # Default sample rate of microphone or recording device
DURATION = 5 # 3 seconds
CHUNK_SIZE = 1024
logger = logging.getLogger('sound')
class Sound(object):
def __init__(self):
# Set default configurations for recording device
# sd.default.samplerate = DEFAULT_SAMPLE_RATE
# sd.default.channels = DEFAULT_CHANNELS
self.format = pyaudio.paInt16
self.channels = MAX_INPUT_CHANNELS
self.sample_rate = DEFAULT_SAMPLE_RATE
self.chunk = CHUNK_SIZE
self.duration = DURATION
self.path = os.path.join(os.getcwd(), "recorded0.wav")
self.device = INPUT_DEVICE
self.frames = []
self.audio = pyaudio.PyAudio()
self.device_info()
print()
logger.info("Audio device configurations currently used")
logger.info(f"Default input device index = {self.device}")
logger.info(f"Max input channels = {self.channels}")
logger.info(f"Default samplerate = {self.sample_rate}")
def device_info(self):
num_devices = self.audio.get_device_count()
keys = ['name', 'index', 'maxInputChannels', 'defaultSampleRate']
logger.info(f"List of System's Audio Devices configurations:")
logger.info(f"Number of audio devices: {num_devices}")
for i in range(num_devices):
info_dict = self.audio.get_device_info_by_index(i)
logger.info([(key, value) for key, value in info_dict.items() if key in keys])
def record(self):
# start Recording
self.audio = pyaudio.PyAudio()
stream = self.audio.open(
format=self.format,
channels=self.channels,
rate=self.sample_rate,
input=True,
frames_per_buffer=self.chunk,
input_device_index=self.device)
logger.info(f"Recording started for {self.duration} seconds")
self.frames = []
for i in range(0, int(self.sample_rate / self.chunk * self.duration)):
data = stream.read(self.chunk)
self.frames.append(data)
logger.info ("Recording Completed")
# stop Recording
stream.stop_stream()
stream.close()
self.audio.terminate()
self.save()
def save(self):
waveFile = wave.open(self.path, 'wb')
waveFile.setnchannels(self.channels)
waveFile.setsampwidth(self.audio.get_sample_size(self.format))
waveFile.setframerate(self.sample_rate)
waveFile.writeframes(b''.join(self.frames))
waveFile.close()
logger.info(f"Recording saved to {self.path}")
sound = Sound()
|
nilq/baby-python
|
python
|
# -------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# -------------------------------------------------------------
# Autogenerated By : src/main/python/generator/generator.py
# Autogenerated From : scripts/builtin/cox.dml
from typing import Dict, Iterable
from systemds.operator import OperationNode, Matrix, Frame, List, MultiReturn, Scalar
from systemds.script_building.dag import OutputType
from systemds.utils.consts import VALID_INPUT_TYPES
def cox(X: Matrix,
TE: Matrix,
F: Matrix,
R: Matrix,
**kwargs: Dict[str, VALID_INPUT_TYPES]):
"""
This script fits a cox Proportional hazard regression model.
The Breslow method is used for handling ties and the regression parameters
are computed using trust region newton method with conjugate gradient
:param X: Location to read the input matrix X containing the survival data
containing the following information
1: timestamps
2: whether an event occurred (1) or data is censored (0)
3: feature vectors
:param TE: Column indices of X as a column vector which contain timestamp
(first row) and event information (second row)
:param F: Column indices of X as a column vector which are to be used for
fitting the Cox model
:param R: If factors (categorical variables) are available in the input matrix
X, location to read matrix R containing the start and end indices of
the factors in X
R[,1]: start indices
R[,2]: end indices
Alternatively, user can specify the indices of the baseline level of
each factor which needs to be removed from X; in this case the start
and end indices corresponding to the baseline level need to be the same;
if R is not provided by default all variables are considered to be continuous
:param alpha: Parameter to compute a 100*(1-alpha)% confidence interval for the betas
:param tol: Tolerance ("epsilon")
:param moi: Max. number of outer (Newton) iterations
:param mii: Max. number of inner (conjugate gradient) iterations, 0 = no max
:return: A D x 7 matrix M, where D denotes the number of covariates, with the following schema:
M[,1]: betas
M[,2]: exp(betas)
M[,3]: standard error of betas
M[,4]: Z
M[,5]: P-value
M[,6]: lower 100*(1-alpha)% confidence interval of betas
M[,7]: upper 100*(1-alpha)% confidence interval of betas
:return: Two matrices containing a summary of some statistics of the fitted model:
1 - File S with the following format
- row 1: no. of observations
- row 2: no. of events
- row 3: log-likelihood
- row 4: AIC
- row 5: Rsquare (Cox & Snell)
- row 6: max possible Rsquare
2 - File T with the following format
- row 1: Likelihood ratio test statistic, degree of freedom, P-value
- row 2: Wald test statistic, degree of freedom, P-value
- row 3: Score (log-rank) test statistic, degree of freedom, P-value
:return: Additionally, the following matrices are stored (needed for prediction)
1- A column matrix RT that contains the order-preserving recoded timestamps from X
2- Matrix XO which is matrix X with sorted timestamps
3- Variance-covariance matrix of the betas COV
4- A column matrix MF that contains the column indices of X with the baseline factors removed (if available)
"""
params_dict = {'X': X, 'TE': TE, 'F': F, 'R': R}
params_dict.update(kwargs)
vX_0 = Matrix(X.sds_context, '')
vX_1 = Matrix(X.sds_context, '')
vX_2 = Matrix(X.sds_context, '')
vX_3 = Matrix(X.sds_context, '')
vX_4 = Matrix(X.sds_context, '')
vX_5 = Matrix(X.sds_context, '')
output_nodes = [vX_0, vX_1, vX_2, vX_3, vX_4, vX_5, ]
op = MultiReturn(X.sds_context, 'cox', output_nodes, named_input_nodes=params_dict)
vX_0._unnamed_input_nodes = [op]
vX_1._unnamed_input_nodes = [op]
vX_2._unnamed_input_nodes = [op]
vX_3._unnamed_input_nodes = [op]
vX_4._unnamed_input_nodes = [op]
vX_5._unnamed_input_nodes = [op]
return op
|
nilq/baby-python
|
python
|
from flask import Flask, jsonify
import json
from flask import Flask, render_template
import numpy as np
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.orm import Session
from sqlalchemy import create_engine, func
engine = create_engine("sqlite:///Resources/hawaii.sqlite")
# reflect an existing database into a new model
Base = automap_base()
# reflect the tables
Base.prepare(engine, reflect=True)
# Save references to each table
Measurement = Base.classes.measurement
Station = Base.classes.station
app = Flask(__name__)
@app.route("/")
def index():
return render_template("index.html", title="SQLAlchemy API Homework with Navigation")
@app.route("/api/v1.0/precipitation")
def precipitation():
session = Session(engine)
results = session.query(Measurement.date,Measurement.prcp).\
order_by(Measurement.date).all()
session.close()
precipitation = list(np.ravel(results))
precipitation = {precipitation[i]: precipitation[i + 1] for i in range(0, len(precipitation), 2)}
return render_template('index2.html', jsonfile=json.dumps(precipitation))
@app.route("/api/v1.0/precipitation2")
def precipitation2():
session = Session(engine)
results = session.query(Measurement.date,Measurement.prcp).\
order_by(Measurement.date).all()
session.close()
precipitation = list(np.ravel(results))
precipitation = {precipitation[i]: precipitation[i + 1] for i in range(0, len(precipitation), 2)}
return jsonify(precipitation)
@app.route("/api/v1.0/stations")
def stations():
session = Session(engine)
results = session.query(Station.station).\
order_by(Station.station).all()
session.close()
stations = list(np.ravel(results))
return render_template('index2.html', jsonfile=json.dumps(stations))
@app.route("/api/v1.0/stations2")
def stations2():
session = Session(engine)
results = session.query(Station.station).\
order_by(Station.station).all()
session.close()
stations = list(np.ravel(results))
return jsonify(stations)
@app.route("/api/v1.0/tobs")
def tobs():
session = Session(engine)
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= '2016-08-23').\
order_by(Measurement.date).all()
session.close()
tobs = list(np.ravel(results))
tobs = {tobs[i]: tobs[i + 1] for i in range(0, len(tobs), 2)}
return render_template('index2.html', jsonfile=json.dumps(tobs))
@app.route("/api/v1.0/tobs2")
def tobs2():
session = Session(engine)
results = session.query(Measurement.date, Measurement.tobs).\
filter(Measurement.date >= '2016-08-23').\
order_by(Measurement.date).all()
session.close()
tobs = list(np.ravel(results))
tobs = {tobs[i]: tobs[i + 1] for i in range(0, len(tobs), 2)}
return jsonify(tobs)
@app.route("/api/v1.0/<start_date>")
def data_start_date(start_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).all()
session.close()
start_date = []
for min, avg, max in results:
start_date2 = {}
start_date2["Minimum_Temp"] = min
start_date2["AVG_Temp"] = avg
start_date2["Max_Temp"] = max
start_date.append(start_date2)
return jsonify(start_date)
@app.route("/api/v1.0/<start_date>/<end_date>")
def data_start_end_date(start_date, end_date):
session = Session(engine)
results = session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs)).\
filter(Measurement.date >= start_date).filter(Measurement.date <= end_date).all()
session.close()
end_date = []
for min, avg, max in results:
end_date2 = {}
end_date2["Minimum_Temp"] = min
end_date2["AVG_Temp"] = avg
end_date2["Max_Temp"] = max
end_date.append(end_date2)
return jsonify(end_date)
if __name__ == "__main__":
app.run(debug=True)
|
nilq/baby-python
|
python
|
class User():
def __init__(self, first_name, last_name, gender, email):
self.first_name = first_name
self.last_name = last_name
self.gender = gender
self.email = email
self.login_attempts = 0
def describe_user(self):
print(self.first_name)
print(self.last_name)
print(self.gender)
print(self.email)
def greet_user(self):
print("Hello "+ self.first_name.title())
def increment_login_attempts(self):
self.login_attempts += 1
def reset_login_attempts(self):
self.login_attempts = 0
user1 = User('Karandeep', 'Bhardwaj', 'male', 'karandiip@gmail.com')
user1 = User('Jaya', 'Sachdeva', 'female','jaya9.js@gmail.com')
user1 = User('Megha', 'Bhardwaj', 'female', 'megha@gmail.com')
def print_for_user(o):
o.describe_user()
class Privileges():
def __init__(self, list):
self.privileges = list
def show_privileges(self):
for privilege in self.privileges:
print(privilege)
class Admin(User):
def __init__(self, first_name, last_name, gender, email):
super().__init__(first_name, last_name, gender, email)
self.privileges = ['can add post', 'can delete post', 'can ban user', 'can reset the password for user']
self.privilege = Privileges(self.privileges)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import pod
import sys, binascii
from StringIO import StringIO
def ConvertMac(dotted):
if dotted.find(":") == -1:
str = binascii.unhexlify(dotted)
else:
str = "".join([chr(eval("0x"+i)) for i in dotted.split(":")])
if len(str) != 6:
raise ValueError("Not a MAC address")
return str
def Help():
print "Usage: getmac.py"
print "Copies mac to mac.bin"
sys.exit(-1)
if __name__ == "__main__":
p = pod.Pod("turbo")
p.GetMac()
p.Close()
|
nilq/baby-python
|
python
|
from typing import List
from ..codes import *
from dataclasses import dataclass
@dataclass(repr=True, eq=True)
class OppoCommand:
"""Represents a command to an OppoDevice"""
code: OppoCodeType
_parameters: List[str]
_response_codes: List[str]
def __init__(self, code: OppoCodeType, parameters: List[str] = None, response_codes: List[str] = None):
if parameters is None:
parameters = []
if response_codes is None:
response_codes = []
self.code = self._translate(code)
self._parameters = parameters
self._response_codes = response_codes + [self.code.value]
def encode(self):
params = ""
if len(self._parameters) > 0:
params = " " + " ".join(list(map(str, self._parameters)))
return f"#{self.code.value}{params}\r".encode()
@property
def expected_response_codes(self):
return self._response_codes
def _translate(self, code: OppoCodeType):
if isinstance(code, str):
return OppoCode(code)
return code
|
nilq/baby-python
|
python
|
from django.urls import path
from rest_framework import routers
from .views import *
router = routers.DefaultRouter()
router.register('notes', NoteViewSet, basename='notes')
router.register('projects', ProjectViewSet, basename='projects')
router.register('habits', HabitViewSet, basename='habits')
urlpatterns = router.urls + [
path('subtasks/', SubtaskViewSet.as_view({'post': 'create'})),
path('subtasks/<int:pk>/', SubtaskViewSet.as_view({'put': 'update',
'patch': 'partial_update',
'delete': 'destroy'}))
]
|
nilq/baby-python
|
python
|
import torch.nn as nn
import torch
from Postional import PositionalEncoding
class TransAm(nn.Module):
def __init__(self, feature_size=200, num_layers=1, dropout=0.1):
super(TransAm, self).__init__()
self.model_type = 'Transformer'
self.src_mask = None
self.pos_encoder = PositionalEncoding(feature_size)
self.encoder_layer = nn.TransformerEncoderLayer(d_model=feature_size, nhead=10, dropout=dropout)
self.transformer_encoder = nn.TransformerEncoder(self.encoder_layer, num_layers=num_layers)
self.decoder = nn.Linear(feature_size, 1)
self.init_weights()
def init_weights(self):
initrange = 0.1
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
if self.src_mask is None or self.src_mask.size(0) != len(src):
device = src.device
mask = self._generate_square_subsequent_mask(len(src)).to(device)
self.src_mask = mask
src = self.pos_encoder(src)
output = self.transformer_encoder(src, self.src_mask) # , self.src_mask)
output = self.decoder(output)
return output
def _generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
|
nilq/baby-python
|
python
|
# just a package
|
nilq/baby-python
|
python
|
class Solution:
def isPalindrome(self, x: int) -> bool:
temp = str(x)
length = len(temp)
flag = 0
for i in range(0,length):
if temp[i:i+1] != temp[length-1-i:length-i]:
flag = 1
if flag == 1:
return False
else:
return True
|
nilq/baby-python
|
python
|
from django.contrib import admin
from .models import Pokemon
admin.site.register(Pokemon)
|
nilq/baby-python
|
python
|
#!/usr/local/bin/python3
#-*- encoding: utf-8 -*-
from flask import Flask
from flask_restx import Api
from setting import config
from app.api.client import worker_client
def run():
app = Flask(__name__)
api = Api(
app,
version='dev_0.1',
title='Integrated Worker Server API',
description='작업 명령 서버',
terms_url="/",
contact="unripedata@gmail.com",
license="MIT",
url_scheme='http'
)
api.add_namespace(worker_client, '/worker/client')
app.run(host=config.HOST, port=config.PORT, debug=config.DEBUG, use_reloader=False)
# api.add_namespace(command_check, '/command/check')
# api.add_namespace(command_check, '/client/check')
# api.add_namespace(order_worker_client_clientOrder_check, '/order/client')
# api.add_namespace(ra_command_sync_client_device, '/save/client/device')
|
nilq/baby-python
|
python
|
# ==============================CS-199==================================
# FILE: MyAI.py
#
# AUTHOR: Vaibhav Yengul
#
# DESCRIPTION: This file contains the MyAI class. You will implement your
# agent in this file. You will write the 'getAction' function,
# the constructor, and any additional helper functions.
#
# NOTES: - MyAI inherits from the abstract AI class in AI.py.
#
# - DO NOT MAKE CHANGES TO THIS FILE.
# ==============================CS-199==================================
from AI import AI
from Action import Action
import random
class TileInfo:
def __init__(self, numbr, _uncover):
self.number = numbr
self.uncover = _uncover
self.voteNumber = 0
class MyAI(AI):
def __init__(self, rowDimension, colDimension, totalMines, startX, startY):
self.rows = colDimension
self.cols = rowDimension
self.totalMines = totalMines
self.minesLeft = totalMines
self.prev_x = startX
self.prev_y = startY
self.Tiles = [[TileInfo(-10, False) for j in range(self.cols)] for i in range(self.rows)]
self.queue = []
self.voteq = []
self.debug = False
self.uncoverCount = 0
def getAction(self, number: int) -> "Action Object":
newx, newy = self.prev_x, self.prev_y
(self.Tiles[newx][newy]).number = number
(self.Tiles[newx][newy]).uncover = True
self.uncoverCount += 1
top_left = (newx - 1, newy + 1)
top_right = (newx + 1, newy + 1)
top = (newx, newy + 1)
left = (newx - 1, newy)
right = (newx + 1, newy)
bt_left = (newx - 1, newy - 1)
bt = (newx, newy - 1)
bt_right = (newx + 1, newy - 1)
listof = [top, top_left, top_right, left, right, bt, bt_left, bt_right];
if number == 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
self.Tiles[move[0]][move[1]].voteNumber = -1
elif number > 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols and self.Tiles[move[0]][move[1]].voteNumber!=-1:
self.Tiles[move[0]][move[1]].voteNumber += 1
if number == -1:
self.minesLeft -= 1
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
if self.Tiles[move[0]][move[1]].number > 0:
self.Tiles[move[0]][move[1]].number -= 1
elif number > 0:
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols:
if self.Tiles[move[0]][move[1]].number == -1:
self.Tiles[newx][newy].number -= 1
queue2 = []
if number == 0:
for x1 in range(newx - 1, newx + 2):
for y1 in range(newy - 1, newy + 2):
if 0 <= x1 < self.rows and 0 <= y1 < self.cols:
if x1 == newx and y1 == newy:
continue
queue2.append([x1, y1, AI.Action.UNCOVER])
"""
if number == 1:
for x in range(newx-2, newx+3):
queue2.append([x, newy-2])
queue2.append([x, newy + 2])
queue2.extend([[newx-2, newy-1], [newx-2, newy], [newx-2, newy+1], [newx+2, newy-1],[newx+2, newy], [newx+2, newy+1]]);
"""
queue3 = []
for c in queue2:
if self.rows > c[0] >= 0 and self.cols > c[1] >= 0 and not (self.Tiles[c[0]][c[1]]).uncover:
queue3.append(c);
for a in queue3:
found = False
for item in self.queue:
if (a[0] == item[0] and a[1] == item[1]):
found = True
break;
if not found:
self.queue.append(a);
# print(" ; ".join(str(i) for i in self.queue))
if self.debug:
self.printBoard();
action = -10
inval = 0
while action == -10 and inval < 10:
action = self.getNextAct(action)
inval += 1
if (action == -10):
cnt, ctb = 0, 0
nx, ny, nnx, nny = -1, -1, -1, -1
for x in range(self.rows):
for y in range(self.cols):
if self.Tiles[x][y].number == -1:
ctb += 1
nnx, nny = x, y
if self.Tiles[x][y].number == -10:
cnt += 1
nx, ny = x, y
if cnt == 1:
self.prev_x = nx
self.prev_y = ny
action = AI.Action.UNCOVER if ctb == self.totalMines else AI.Action.FLAG
if self.debug:
print(action, self.prev_x,self.prev_y,"\n")
return Action(action, nx, ny);
if cnt == 0:
self.prev_x = nnx
self.prev_y = nny
action = AI.Action.UNCOVER
if self.debug:
print(action, self.prev_x, self.prev_y, "\n")
return Action(AI.Action.UNCOVER, nnx, nny)
portion = 2/3
if self.rows == 30:
portion = 4/5
if(action == -10 and self.uncoverCount > (portion * self.rows*self.cols)):
if not self.Tiles[self.rows-1][self.cols-1].uncover:
self.prev_x = self.rows-1
self.prev_y = self.cols - 1
action = AI.Action.UNCOVER
elif not self.Tiles[self.rows-1][0].uncover:
self.prev_x = self.rows-1
self.prev_y = 0
action = AI.Action.UNCOVER
elif not self.Tiles[0][self.cols-1].uncover:
self.prev_x = 0
self.prev_y = self.cols - 1
action = AI.Action.UNCOVER
elif not self.Tiles[0][0].uncover:
self.prev_x = 0
self.prev_y = 0
action = AI.Action.UNCOVER
if (action == -10):
# add voting mechanism
self.recalculateVotes()
a = random.choice(self.voteq)
self.prev_x = a[0]
self.prev_y = a[1]
action = a[2]
if self.debug:
print(action, self.prev_x,self.prev_y,"\n")
return Action(action, self.prev_x, self.prev_y);
def recalculateVotes(self):
self.voteq.clear()
if self.debug:
self.printVoteBoard()
max = -100
min = 100
xmax, ymax = [], []
xmin, ymin = [], []
for a in range(self.rows):
for b in range(self.cols):
if self.Tiles[a][b].number != -10 or self.Tiles[a][b].uncover: continue
if self.Tiles[a][b].voteNumber > max:
max = self.Tiles[a][b].voteNumber
xmax = [a]
ymax = [b]
elif self.Tiles[a][b].voteNumber == max:
xmax.append(a)
ymax.append(b)
if self.Tiles[a][b].voteNumber ==0:
continue
if self.Tiles[a][b].voteNumber < min :
min = self.Tiles[a][b].voteNumber
xmin = [a]
ymin = [b]
elif self.Tiles[a][b].voteNumber == min:
xmin.append(a)
ymin.append(b)
for i in range(len(xmax)):
self.voteq.append([xmax[i], ymax[i], AI.Action.FLAG])
break;
def printBoard(self):
print("\n")
for i in range(self.rows):
print("\t".join([str(x.number) for x in self.Tiles[i]]))
print("\n")
def printVoteBoard(self):
print("\n")
for i in range(self.rows):
vb = [str(x.voteNumber) for x in self.Tiles[i]]
vb = [str(t) if self.Tiles[i][j].number == -10 else str(-1) for j, t in enumerate(vb)]
print("\t".join(vb))
print("\n")
def getNextAct(self, action):
if (len(self.queue) and action == -10):
a = self.queue.pop(0)
self.prev_x = a[0]
self.prev_y = a[1]
if self.Tiles[a[0]][a[1]].uncover:
action = -10
else:
action = a[2]
if action == -10 and len(self.queue) == 0:
self.fillqueue()
queue3 = []
for c in self.queue:
if self.rows > c[0] >= 0 and self.cols > c[1] >= 0 and not (self.Tiles[c[0]][c[1]]).uncover:
queue3.append(c)
self.queue = queue3
if (len(self.queue)):
a = self.queue.pop(0);
self.prev_x = a[0]
self.prev_y = a[1]
action = a[2]
return action;
def fillqueue(self):
for y in range(1, self.cols - 1):
if self.Tiles[self.rows - 2][y].number == -10 or self.Tiles[self.rows - 2][y].number == -1 or \
self.Tiles[self.rows - 2][y].number == 0: continue
self.identifyPatterns(self.rows - 2, y)
if not self.queue:
for y in range(1, self.cols - 1):
if self.Tiles[1][y].number == -10 or self.Tiles[1][y].number == 0 or self.Tiles[1][
y].number == -1: continue
self.identifyPatterns2(1, y)
if not self.queue:
for x in range(1, self.rows - 1):
if self.Tiles[x][1].number == -10 or self.Tiles[x][1].number == 0 or self.Tiles[x][1].number == -1:
continue
self.identifyPatterns4(x, 1)
if not self.queue:
for x in range(1, self.rows - 1):
if self.Tiles[x][self.cols - 2].number == -10 or self.Tiles[x][self.cols - 2].number == 0 or \
self.Tiles[x][self.cols - 2].number == -1: continue
self.identifyPatterns5(x, self.cols - 2)
if not self.queue:
for x in range(1, self.rows - 1):
for y in range(1, self.cols - 1):
if self.Tiles[x][y].number == -10 or self.Tiles[x][y].number == 0 or self.Tiles[x][
y].number == -1: continue
self.identifyPatterns3(x, y)
if not self.queue:
for y in range(1, self.cols - 1):
if self.Tiles[0][y].number == -10 or self.Tiles[0][y].number == 0 or self.Tiles[0][
y].number == -1: continue
# row 0
if self.Tiles[0][y].number == 1 and [t.uncover for t in self.Tiles[1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[0][y - 1].uncover and not self.Tiles[0][y + 1].uncover:
self.queue.append([0, y + 1, AI.Action.FLAG])
elif self.Tiles[0][y].number == 1 and [t.uncover for t in self.Tiles[1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[0][y + 1].uncover and not self.Tiles[0][y - 1].uncover:
self.queue.append([0, y - 1, AI.Action.FLAG])
for y in range(1, self.cols - 1):
g = self.rows - 1
if self.Tiles[g][y].number == -10 or self.Tiles[g][y].number == 0 or self.Tiles[g][
y].number == -1: continue
if self.Tiles[g][y].number == 1 and [t.uncover for t in self.Tiles[g - 1][y - 1:y + 2]] == [True, True,
True] and \
self.Tiles[g][y - 1].uncover and not self.Tiles[g][y + 1].uncover:
self.queue.append([g, y + 1, AI.Action.FLAG])
elif self.Tiles[g][y].number == 1 and [t.uncover for t in self.Tiles[g - 1][y - 1:y + 2]] == [True,
True,
True] and \
self.Tiles[g][y + 1].uncover and not self.Tiles[g][y - 1].uncover:
self.queue.append([g, y - 1, AI.Action.FLAG])
for x in range(1, self.rows - 1):
if self.Tiles[x][0].number == -10 or self.Tiles[0][y].number == 0 or self.Tiles[0][
y].number == -1: continue
# print([t[0].uncover for t in self.Tiles[x - 1:x + 2]])
# col-0
if self.Tiles[x][0].number == 1 and [t[0].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x + 1][1].uncover and self.Tiles[x][1].uncover and not self.Tiles[x - 1][1].uncover:
self.queue.append([x - 1, 1, AI.Action.FLAG])
elif self.Tiles[x][0].number == 1 and [t[0].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x - 1][1].uncover and self.Tiles[x][1].uncover and not self.Tiles[x + 1][1].uncover:
self.queue.append([x + 1, 1, AI.Action.FLAG])
for x in range(1, self.rows - 1):
g = self.cols - 1
# col-last
# print([t[g].uncover for t in self.Tiles[x - 1:x + 2]])
if self.Tiles[x][g].number == 1 and [t[g].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x + 1][g - 1].uncover and self.Tiles[x][g - 1].uncover and not self.Tiles[x - 1][
g - 1].uncover:
self.queue.append([x - 1, g - 1, AI.Action.FLAG])
elif self.Tiles[x][g].number == 1 and [t[g].uncover for t in self.Tiles[x - 1:x + 2]] == [True, True,
True] and \
self.Tiles[x - 1][g - 1].uncover and self.Tiles[x][g - 1].uncover and not self.Tiles[x + 1][
g - 1].uncover:
self.queue.append([x + 1, g - 1, AI.Action.FLAG])
if not self.queue:
self.fillqueue2()
if not self.queue:
corners = {"tl":[1,1], "tr":[1, self.cols-2], "bl":[self.rows-2, 1], "br":[self.rows-2, self.cols-2]}
for c in corners.keys():
self.identifyCornerPatters(c, corners[c][0], corners[c][1]);
def fillqueue2(self):
for x1 in range(self.rows):
for y1 in range(self.cols):
if self.Tiles[x1][y1].uncover and self.Tiles[x1][y1].number == 0:
top_left = (x1 - 1, y1 + 1)
top_right = (x1 + 1, y1 + 1)
top = (x1, y1 + 1)
left = (x1 - 1, y1)
right = (x1 + 1, y1)
bt_left = (x1 - 1, y1 - 1)
bt = (x1, y1 - 1)
bt_right = (x1 + 1, y1 - 1)
listof = [top, top_left, top_right, left, right, bt, bt_left, bt_right];
for move in listof:
if 0 <= move[0] < self.rows and 0 <= move[1] < self.cols and self.Tiles[move[0]][
move[1]].number == -10 and not self.Tiles[move[0]][move[1]].uncover \
and self.Tiles[move[0]][move[1]].number != -1:
self.queue.append([move[0], move[1], AI.Action.UNCOVER])
def identifyCornerPatters(self, corner, x, y):
if self.minesLeft> 2:
return
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if corner=="bl" and pat2==[[True,True,True], [False,False,True], [False,False,True]]:
if (pat[1][2]==1 or pat[2][2]==1) and (pat[0][0]==1 or pat[0][1]==1) and self.minesLeft==2:
self.queue.append([x, y-1, AI.Action.FLAG])
self.queue.append([x+1, y, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="tr" and pat2==[[True,False,False], [True,False,False], [True,True,True]]:
if (pat[2][1]==1 or pat[2][2]==1) and (pat[0][0]==1 or pat[1][0]==1) and self.minesLeft==2:
self.queue.append([x-1, y, AI.Action.FLAG])
self.queue.append([x, y+1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="br" and pat2==[[True,True,True], [True,False,False], [True,False,False]]:
if (pat[1][0]==1 or pat[2][0]==1) and (pat[0][1]==1 or pat[0][2]==1) and self.minesLeft==2:
self.queue.append([x+1, y, AI.Action.FLAG])
self.queue.append([x, y+1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
elif corner=="tl" and pat2==[[False,False,True], [False,False,True], [True,True,True]]:
if (pat[1][2]==1 or pat[0][2]==1) and (pat[2][0]==1 or pat[2][1]==1) and self.minesLeft==2:
self.queue.append([x+1, y, AI.Action.FLAG])
self.queue.append([x, y-1, AI.Action.FLAG])
else:
self.queue.append([x, y, AI.Action.FLAG])
def isValidTile(self, a, b):
return 0<=a<self.rows and 0<=b<self.cols
def identifyPatterns3(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
# 1-2-1 pattern
if pat[1] == [1, 2, 1] and pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[0][2]:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif pat[1] == [1, 2, 1] and pat2[0] == [True, True, True] and not pat2[2][0] and not pat2[2][2]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif [t[1] for t in pat] == [1, 2, 1] and [t[0] for t in pat2] == [True, True, True] and not pat2[0][
2] and not pat2[2][2]:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif [t[1] for t in pat] == [1, 2, 1] and [t[2] for t in pat2] == [True, True, True] and not pat2[0][
0] and not pat2[2][0]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[1][2]==1 and pat2[2] == [True, True, True] and pat2[1][0] and \
pat2[0]==[False,False,False]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[1][0]==1 and pat2[2] == [True, True, True] and pat2[1][2] and \
pat2[0]==[False,False,False]:
self.queue.append([x-1, y+1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[2][1]==1 and [t[0] for t in pat2] == [True, True, True] and pat2[0][1] and \
[t[2] for t in pat2]==[False,False,False]:
self.queue.append([x-1, y+1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[0][1]==1 and [t[0] for t in pat2] == [True, True, True] and pat2[2][1] and \
[t[2] for t in pat2]==[False,False,False]:
self.queue.append([x+1, y+1, AI.Action.FLAG])
#mirror done
elif pat[1][1]==2 and pat[0][1]==1 and [t[2] for t in pat2] == [True, True, True] and pat2[2][1] and \
[t[0] for t in pat2]==[False,False,False]:
self.queue.append([x+1, y-1, AI.Action.FLAG])
elif pat[1][1]==2 and pat[2][1]==1 and [t[2] for t in pat2] == [True, True, True] and pat2[0][1] and \
[t[0] for t in pat2]==[False,False,False]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
elif pat[1][1] == 2 and pat[1][2] == 1 and pat2[0] == [True, True, True] and pat2[1][0] and \
pat2[2] == [False, False, False]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif pat[1][1] == 2 and pat[1][0] == 1 and pat2[0] == [True, True, True] and pat2[1][2] and \
pat2[2] == [False, False, False]:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif pat[1][1] == 1 and pat[1][2] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][1] and not pat2[0][2] and \
self.isValidTile(x-1, y+2) and not self.Tiles[x-1][y+2].uncover:
self.queue.append([x - 1, y + 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[1][2] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[2][1] and not pat2[2][2] and \
self.isValidTile(x+1, y+2) and not self.Tiles[x+1][y+2].uncover:
self.queue.append([x + 1, y + 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[2][1] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[1][2] and not pat2[2][2] and \
self.isValidTile(x+2, y+1) and not self.Tiles[x+2][y+1].uncover:
self.queue.append([x + 2, y + 1, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[2][1] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[1][0] and not pat2[2][0] and \
self.isValidTile(x+2, y-1) and not self.Tiles[x+2][y-1].uncover:
self.queue.append([x + 2, y - 1, AI.Action.UNCOVER])
##
elif pat[1][1] == 1 and pat[1][0] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[0] == [True, True, True] and not pat2[2][0] and not pat2[2][1] and \
self.isValidTile(x+1, y-2) and not self.Tiles[x+1][y-2].uncover:
self.queue.append([x + 1, y - 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[1][0] == 1 and [t[2] for t in pat2] == [True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[0][1] and \
self.isValidTile(x - 1, y - 2) and not self.Tiles[x - 1][y - 2].uncover:
self.queue.append([x - 1, y - 2, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[0][1] == 1 and [t[2] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][0] and not pat2[1][0] and \
self.isValidTile(x-2, y-1) and not self.Tiles[x-2][y-1].uncover:
self.queue.append([x - 2, y - 1, AI.Action.UNCOVER])
elif pat[1][1] == 1 and pat[0][1] == 1 and [t[0] for t in pat2]==[True, True, True] and \
pat2[2] == [True, True, True] and not pat2[0][2] and not pat2[1][2] and \
self.isValidTile(x-2, y+1) and not self.Tiles[x-2][y+1].uncover:
self.queue.append([x - 2, y + 1, AI.Action.UNCOVER])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
def identifyPatterns(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
# print("\nPattern printing:\n");
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and pat[1][0] == -10 and pat[2][1] == 1 and pat[2][0] == -10):
if not self.Tiles[x - 1][y - 1].uncover:
self.queue.append([x - 1, y - 1, AI.Action.UNCOVER])
elif (pat[1][1] == 1 and pat[1][2] == -10 and pat[2][1] == 1 and pat[2][2] == -10):
if not self.Tiles[x - 1][y + 1].uncover:
self.queue.append([x - 1, y + 1, AI.Action.UNCOVER])
elif (pat[1] == [1, 2, 1] and pat2[2] == [False, False, False]):
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][2] == -10 and pat[2][1] == 2 and pat[2][2] == -10):
if not self.Tiles[x + 1][y + 1].uncover:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
if not self.Tiles[x - 1][y - 1].uncover:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == -10 and pat[2][1] == 2 and pat[2][0] == -10):
if not self.Tiles[x + 1][y - 1].uncover:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
if not self.Tiles[x][y - 1].uncover:
self.queue.append([x, y - 1, AI.Action.FLAG])
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2] == [True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[0][1] == 2 and pat[1][1] == 2 and pat[1][2] == -10 and pat[2][1] == 1 and pat[2][2] == -10 and pat[0][
2] == -10):
if not self.Tiles[x - 1][y + 1].uncover:
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[1][0] == 2 and pat[1][1] == 2 and pat[1][2] == 1 and pat[2][0] == -10 and pat[2][1] == -10 and pat[2][
2] == 1):
if not self.Tiles[x + 1][y - 1].uncover:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
if not self.Tiles[x + 1][y].uncover:
self.queue.append([x + 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][0] == 1 and pat[1][1] == 2 and pat[1][2] == 2 and pat[2][0] == 1 and pat[2][1] == -10 and pat[2][
2] == -10):
if not self.Tiles[x + 1][y + 1].uncover:
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
if not self.Tiles[x + 1][y].uncover:
self.queue.append([x + 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == 1 and pat[2][1] == 1 and pat2[0] == [False, False, False] and not pat2[1][
2] and not pat2[2][2]):
self.queue.append([x - 1, y + 1, AI.Action.UNCOVER])
def identifyPatterns2(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
# print("\nPattern printing:\n");
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
if 0 <= x + i < self.rows and 0 <= y + j < self.cols:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if [t[0] for t in pat2]==[False,False,False] and [t[2] for t in pat2]==[True, True, True] and [t[1] for t in pat] == [1,2,1]:
self.queue.append([x-1,y-1,AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif (pat[1][1] == 1 and pat[1][0] == -10 and pat[0][1] == 1 and pat[0][0] == -10):
if self.Tiles[x + i][y + j].number < -99:
self.queue.append([x + 1, y - 1, AI.Action.UNCOVER])
elif (pat[1][1] == 1 and pat[1][2] == -10 and pat[0][1] == 1 and pat[0][2] == -10):
if self.Tiles[x + 1][y + 1].number < -99:
self.queue.append([x + 1, y + 1, AI.Action.UNCOVER])
elif (pat[1][1] == 2 and pat[1][2] == -10 and pat[0][1] == 2 and pat[0][2] == -10):
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x, y + 1, AI.Action.FLAG])
elif (pat[1][1] == 2 and pat[1][0] == -10 and pat[0][1] == 2 and pat[0][0] == -10 and pat2[2] == [True, True, True]
and [t[2] for t in pat2]== [True, True, True]):
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x, y - 1, AI.Action.FLAG])
elif (pat[0][1] == 2 and pat[1][1] == 2 and pat[2][1] == 1 and pat[0][2] == -10 and pat[1][2] == -10 and pat[2][
2] == -10
and pat[0][2] != -10 and pat[1][2] != -10 and pat[2][2] != -10): # 2 -10
self.queue.append([x - 1, y + 1, AI.Action.FLAG]) # "2" -10
self.queue.append([x, y + 1, AI.Action.FLAG]) # 1 -10
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2] == [True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif (pat[0][0] == -10 and pat[1][0] == -10 and pat[2][0] == -10 and pat[0][1] == 1 and pat[1][1] == 2 and
pat[2][1] == 2 and pat[0][2] != -10 and pat[1][2] != -10 and pat[2][2] != -10):
self.queue.append([x + 1, y - 1, AI.Action.FLAG]) # -10 2
elif (pat[1][0] == 1 and pat[1][1] == 2 and pat[1][2] == 2 and pat[0][0] == 1 and pat[0][1] == -10 and pat[0][
2] == -10):
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x - 1, y, AI.Action.FLAG])
elif (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
# for i in [-1, 0, 1]:
# print("\t".join([str(pat[i+1][0]), str(pat[1+i][1]), str(pat[i+1][2])]))
def identifyPatterns4(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if [t[0] for t in pat2] == [False, False, False] and [t[1] for t in pat] == [1, 2, 1] and [t[2] for t in
pat2] == [True, True,
True]:
self.queue.append([x - 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
elif pat2[2] == [False, False, False] and pat2[0] == [True,True,True] and pat[1][0]==1 and pat[1]==[1, 1, 1]:
self.queue.append([x + 1, y + 1, AI.Action.UNCOVER])
def identifyPatterns5(self, x, y):
pat = [[0 for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat[i + 1][j + 1] = self.Tiles[x + i][y + j].number
notuncvr = []
pat2 = [[False for _ in range(3)] for _ in range(3)]
for i in [-1, 0, 1]:
for j in [-1, 0, 1]:
pat2[i + 1][j + 1] = self.Tiles[x + i][y + j].uncover
if not self.Tiles[x + i][y + j].uncover:
notuncvr.append([x + i, y + j])
if (pat[1][1] == 2 and len(notuncvr) == 2):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 1 and len(notuncvr) == 1):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif (pat[1][1] == 3 and len(notuncvr) == 3):
for nuc in notuncvr:
self.queue.append([nuc[0], nuc[1], AI.Action.FLAG])
elif [t[2] for t in pat2] == [False, False, False] and [t[1] for t in pat] == [1, 2, 1] and [t[0] for t in
pat2] == [True, True,
True]:
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
elif (pat[1] == [2,1,1] or pat[1]==[1,1,1]) and pat2[0] == [False, False, False] and pat2[2]==[True, True, True]:
self.queue.append([x-1, y-1, AI.Action.UNCOVER])
elif (pat[1] == [2,1,1] or pat[1]==[1,1,1]) and pat2[2] == [False, False, False] and pat2[0]==[True, True, True]:
self.queue.append([x+1, y-1, AI.Action.UNCOVER])
elif pat[1] == [1,2,1] and pat2[0] == [False, False, False] and pat2[2]==[True, True, True]:
self.queue.append([x-1, y-1, AI.Action.FLAG])
self.queue.append([x - 1, y + 1, AI.Action.FLAG])
elif pat[1] == [1,2,1] and pat2[2] == [False, False, False] and pat2[0]==[True, True, True]:
self.queue.append([x + 1, y - 1, AI.Action.FLAG])
self.queue.append([x + 1, y + 1, AI.Action.FLAG])
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import getopt
import os.path
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir)))
from datetime import datetime
from TwitterEngine import instances, BackendChooser
def parseargs(name, argv):
date = datetime.now()
execute = False
try:
opts, _args = getopt.getopt(argv, 'hed:', ['execute', 'date'])
except getopt.GetoptError:
print('%s [-h]' % name)
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print '%s [-d "YYYY-MM-DD [HH:mm:SS]"]' % name
sys.exit()
elif opt in ('-e', '--execute'):
execute = True
elif opt in ('-d', '--date'):
try:
if len(arg) > 10:
date = datetime.strptime(arg, '%Y-%m-%d %H:%M:%S')
else:
date = datetime.strptime(arg, '%Y-%m-%d')
except ValueError as e:
print "Date format accepted: YYYY-MM-DD [HH:mm:SS]"
raise e
return (date, execute)
if __name__ == '__main__':
try:
engine_config = instances.INSTANCES[0]
(max_date, execute) = parseargs(sys.argv[0], sys.argv[1:])
except ValueError:
sys.exit(1)
backend = BackendChooser.GetBackend(engine_config)
print "Calling delete with parameters max_date = %s, execute = %s." % (max_date, execute)
backend.RemoveOldTweets(max_date, execute)
|
nilq/baby-python
|
python
|
from django.contrib import admin
from django.contrib.auth.decorators import login_required, permission_required
from django.urls import path
from . import views
from .api import views as api_views
app_name = 'main_app'
urlpatterns = [
path('api/sites/', api_views.SitesListCreateAPIView.as_view(), name='sites_rest_api'),
path('api/sites/<uuid>', view=api_views.SitesRetrieveUpdateDestroyAPIView.as_view(),
name='sites_rest_api'),
path('api/devices/', api_views.DevicesListCreateAPIView.as_view(), name='devices_rest_api'),
path('api/devices/<uuid>', api_views.DevicesRetrieveUpdateDestroyAPIView.as_view(),
name='devices_rest_api'),
path('', views.HomeTemplateView.as_view(), name='home'),
path('sites', views.SitesListView.as_view(), name='viewsites'),
path('sites/<int:pk>', views.SitesDetailView.as_view(), name='sitesdetail'),
path('updatesite/<int:pk>', views.SiteUpdateView.as_view(), name='updatesite'),
path('deletesite/<int:pk>', views.SiteDeleteView.as_view(), name='deletesite'),
path('devices', views.DeviceListView.as_view(), name='viewdevices'),
path('create/', views.SiteCreateView.as_view(), name='createsite'),
path('create_device/', views.DeviceCreateView.as_view(), name='createdevice'),
path('devices/<int:pk>', views.DeviceDetailView.as_view(), name='devicedetail'),
path('devices/config/<int:pk>', views.DeviceConfigDetailView.as_view(), name='deviceconfig'),
path('devices/script/<int:pk>', views.DeviceScriptDetailView.as_view(), name='devicescript'),
path('updatedevice/<int:pk>', views.DeviceUpdateView.as_view(), name='updatedevice'),
path('deletedevice/<int:pk>', views.DeviceDeleteView.as_view(), name='deletedevice'),
path('deleteconfig/config/<int:pk>', views.DeviceConfigDeleteView.as_view(), name='deleteconfig'),
path('devices/syncconfig/<deviceip>&<deviceid>', views.sync_configuration, name='configsync'),
path('devices/platformsync/<deviceip>&<deviceid>', views.get_platform_detail, name='platformsync'),
path('search/', views.device_search_function, name='search'),
path('devices/syncvlans/<deviceip>&<deviceid>', views.sync_device_vlans, name='vlanssync'),
path('devices/tasks/vlanchange/<deviceip>&<deviceid>', views.port_vlan_assignment, name='vlanchange'),
]
|
nilq/baby-python
|
python
|
import numpy as np
from features.DetectorDescriptorTemplate import DetectorDescriptorBundle
from features.cv_sift import cv_sift
class SiftDetectorDescriptorBundle(DetectorDescriptorBundle):
def __init__(self, descriptor):
sift = cv_sift()
super(SiftDetectorDescriptorBundle, self).__init__(sift, descriptor)
self.is_detector = True
self.is_descriptor = True
self.is_both = True
self.csv_flag = False
self.patch_input = True
def detect_feature(self, image):
return self.detector.detect_feature_cv_kpt(image)
def extract_descriptor(self, image, feature):
return self.descriptor.extract_descriptor(image, feature)
def extract_all(self, image):
feature = self.detector.detect_feature_cv_kpt(image)
descriptor_vector = []
descriptor_vector = self.descriptor.extract_descriptor(
image, feature)
return feature, descriptor_vector
|
nilq/baby-python
|
python
|
import torch
import torch.nn.functional as F
import torch.nn as nn
from transformers import BertModel
# Convlution,MaxPooling層からの出力次元の算出用関数
def out_size(sequence_length, filter_size, padding = 0, dilation = 1, stride = 1):
length = sequence_length + 2 * padding - dilation * (filter_size - 1) - 1
length = int(length/stride)
return length + 1
class CNN(torch.nn.Module):
def __init__(self, params, gat = None):
super(CNN, self).__init__()
self.bert = BertModel.from_pretrained('bert-base-uncased')
self.conv_layers = nn.ModuleList()
self.pool_layers = nn.ModuleList()
poolingLayer_out_size = 0
self.dropout = params['cnn_dropout']
self.filter_size = params['cnn_filter_sizes']
if bool(self.dropout[0]) :
self.drp1 = nn.Dropout(p = self.dropout[0])
if bool(self.dropout[1]) :
self.drp2 = nn.Dropout(p = self.dropout[1])
for fsz in self.filter_size :
l_conv = nn.Conv1d(params['embedding_dim'], params['cnn_out_channels'], fsz, stride = params['cnn_conv_stride'])
torch.nn.init.xavier_uniform_(l_conv.weight)
l_pool = nn.MaxPool1d(params['cnn_pool_stride'], stride = params['cnn_pool_stride'])
l_out_size = out_size(params['sequence_length'], fsz, stride = params['cnn_conv_stride'])
pool_out_size = int(l_out_size * params['cnn_out_channels'] / params['cnn_pool_stride'])
poolingLayer_out_size += pool_out_size
self.conv_layers.append(l_conv)
self.pool_layers.append(l_pool)
self.linear1 = nn.Linear(poolingLayer_out_size, params['cnn_hidden_dim1'])
self.linear2 = nn.Linear(params['cnn_hidden_dim1'], params['classes'])
torch.nn.init.xavier_uniform_(self.linear1.weight)
torch.nn.init.xavier_uniform_(self.linear2.weight)
def forward(self, texts):
texts = self.bert(texts)[0].detach_()
texts = texts.permute(0, 2, 1)
if bool(self.dropout[0]):
texts = self.drp1(texts)
conv_out = []
for i in range(len(self.filter_size)) :
outputs = self.conv_layers[i](texts)
outputs = outputs.view(outputs.shape[0], 1, outputs.shape[1] * outputs.shape[2])
outputs = self.pool_layers[i](outputs)
outputs = nn.functional.relu(outputs)
outputs = outputs.view(outputs.shape[0], -1)
conv_out.append(outputs)
del outputs
if len(self.filter_size) > 1 :
outputs = torch.cat(conv_out, 1)
else:
outputs = conv_out[0]
outputs = self.linear1(outputs)
outputs = nn.functional.relu(outputs)
if bool(self.dropout[1]) :
outputs = self.drp2(outputs)
outputs = self.linear2(outputs)
return outputs
|
nilq/baby-python
|
python
|
from pi import KafkaProducerClient
class LogProducer(object):
# TODO: Implement parallel processing
producer = KafkaProducerClient()
for idx in range(1000):
data = {
"res": {
"body": {
"success": False,
"code": "INTERNAL_SERVER_ERROR",
"message": "There is an error trying to process your transaction at the moment. Please try again in a while.",
"data": {}
},
"_headers": {
"set-cookie": "id-mercury=; Path=/apis/v1; Expires=Thu, 01 Jan 1970 00:00:00 GMT",
"x-accel-buffering": "no",
"access-control-allow-headers": "undefined",
"access-control-allow-credentials": "true",
"access-control-expose-headers": "id-mercury",
"x-server-timestamp": "1559037314590",
"content-type": "application/json; charset=utf-8",
"content-length": "167",
"etag": "W/\"a7-e+mYDAtUpp7U59+za+6pr7UE294\"",
"x-response-time": "97.723ms"
}
},
"req": {
"body": {
"merchantId": "MORESUPERMARKET",
"transactionId": "12781910260852152512",
"merchantOrderId": "1278-1910260852",
"amount": 28208,
"instrumentType": "MOBILE",
"instrumentReference": "9154548181",
"message": "Collect for Order Id:1278-1910260852",
"email": "",
"expiresIn": 180,
"shortName": "",
"subMerchant": "",
"storeId": "1278",
"terminalId": "J1910"
},
"headers": {
"host": "mercury.traefik.prod.phonepe.com",
"user-agent": "Go-http-client/1.1",
"content-length": "454",
"content-type": "application/json",
"x-client-ip": "103.39.0.112",
"x-forwarded-by": "103.243.35.246:443",
"x-forwarded-for": "103.39.0.112, 10.85.22.27",
"x-forwarded-host": "mercury.traefik.prod.phonepe.com",
"x-forwarded-port": "80",
"x-forwarded-proto": "http",
"x-forwarded-server": "prd-traefik101",
"x-real-ip": "10.85.22.27",
"x-verify": "1ca27036776dbb3d41316e13b82b046e50d8bf3d9d2e96ebc473076f8ab18d11",
"accept-encoding": "gzip",
"authorization": "Bearer eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzUxMiJ9.eyJpc3MiOiJwaG9uZXBlLWFwaSIsImV4cCI6MzMwODcxODQ2MzgsImlhdCI6MTUzMDI3NTgzOCwic3ViIjoiTU9SRVNVUEVSTUFSS0VUIiwicm9sZSI6Im1lcmNoYW50IiwidHlwZSI6InN0YXRpYyJ9.106JWEJDuEKEpb0VodD_F5JTbjUoi6O8JHGWz0T4N2CE9gm4_MIoJnq69J5MB0ZEqpNtD-XcwNl6m2Va5IKjFA",
"x-salt-index": "1",
"x-auth-mode": "dummy"
}
},
"responseTime": 98
}
# producer.send_message(data=data)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
import os, sys
from typing import Union, List
import pprint
pp = pprint.PrettyPrinter(indent=4, stream=sys.stderr)
from google.protobuf.compiler import plugin_pb2 as plugin
from google.protobuf.descriptor_pool import DescriptorPool
from google.protobuf.descriptor import Descriptor, FieldDescriptor, FileDescriptor
from gen_decoder import gen_decoder_section
from gen_encoder import gen_encoder_section
import gen_util as util
import gen_sol_constants as sol_constants
import solidity_protobuf_extensions_pb2 as solpbext
def gen_fields(msg: Descriptor) -> str:
return '\n'.join(map((lambda f: (" {type} {name};").format(type = util.gen_fieldtype(f), name = f.name)), msg.fields))
def gen_map_fields_decl_for_field(f: FieldDescriptor) -> str:
return (sol_constants.MAP_FIELD_DEFINITION).format(
name = f.name,
key_type = util.gen_global_type_name_from_field(f.message_type.fields[0]),
container_type = util.gen_global_type_name_from_field(f)
)
def gen_map_fields(msg: Descriptor) -> str:
map_fields = list(filter(lambda f: f.message_type and f.message_type.GetOptions().map_entry, msg.fields))
return '\n'.join(map(gen_map_fields_decl_for_field, map_fields))
# below gen_* codes for generating external library
def gen_struct_definition(msg: Descriptor) -> str:
"""Generates the following part.
struct Data {
...
}
"""
map_fields = gen_map_fields(msg)
if map_fields.strip():
map_fields = "\n //non serialized fields" + map_fields
else:
map_fields = ""
fields = gen_fields(msg)
if (fields or map_fields):
return (sol_constants.STRUCT_DEFINITION).format(
fields = fields,
map_fields = map_fields
)
else:
return (sol_constants.STRUCT_DEFINITION).format(
fields = " bool x;",
map_fields = map_fields
)
def gen_enums(msg: Union[Descriptor, FileDescriptor]) -> str:
return '\n'.join(map(util.gen_enumtype, msg.enum_types_by_name.values()))
# below gen_* codes for generating internal library
def gen_enum_definition(msg: Union[Descriptor, FileDescriptor]) -> str:
"""Generates the following parts.
enum Foo { ... }
function encode_Foo(...) { ... }
function decode_Foo(...) { ... }
enum Bar { ... }
function encode_Bar(...) { ... }
function decode_Bar(...) { ... }
...
"""
enums = gen_enums(msg)
if enums.strip():
return (sol_constants.ENUMS_DEFINITION).format(
enums = gen_enums(msg)
)
else:
return ""
# below gen_* codes for generating internal library
def gen_utility_functions(msg: Descriptor) -> str:
return (sol_constants.UTILITY_FUNCTION).format(
name = util.gen_internal_struct_name(msg)
)
def gen_map_insert_on_store(f: FieldDescriptor, parent_msg: Descriptor) -> str:
for nt in parent_msg.nested_types:
if nt.GetOptions().map_entry:
if f.message_type and f.message_type is nt:
return ('output._size_{name} = input._size_{name};\n').format(name = f.name)
return ''
def gen_store_code_for_field(f: FieldDescriptor, msg: Descriptor) -> str:
tmpl = ""
if util.field_is_message(f) and util.field_is_repeated(f):
tmpl = sol_constants.STORE_REPEATED
elif util.field_is_message(f):
tmpl = sol_constants.STORE_MESSAGE
else:
return (sol_constants.STORE_OTHER).format(
field = f.name
)
libname = util.gen_struct_codec_lib_name_from_field(f)
return tmpl.format(
i = f.number,
field = f.name,
lib = libname,
map_insert_code = gen_map_insert_on_store(f, msg)
)
def gen_store_codes(msg: Descriptor) -> str:
return ''.join(map((lambda f: gen_store_code_for_field(f, msg)), msg.fields))
def gen_store_function(msg: Descriptor) -> str:
"""Generates the following.
function store(Data memory input, Data storage output) internal {
...
}
"""
return (sol_constants.STORE_FUNCTION).format(
name = util.gen_internal_struct_name(msg),
store_codes = gen_store_codes(msg)
)
def gen_value_copy_code(value_field, dst_flagment):
if util.field_is_message(value_field):
return ("{struct_name}.store(value, {dst}.value);").format(
struct_name = util.gen_struct_codec_lib_name_from_field(value_field),
dst = dst_flagment
)
else:
return ("{dst}.value = value;").format(dst = dst_flagment)
def gen_map_helper_codes_for_field(f: FieldDescriptor, nested_type: Descriptor) -> str:
kf = nested_type.fields[0]
vf = nested_type.fields[1]
key_type = util.gen_global_type_name_from_field(kf)
value_type = util.gen_global_type_name_from_field(vf)
field_type = util.gen_global_type_name_from_field(f)
if util.is_complex_type(value_type):
value_storage_type = "memory"
else:
value_storage_type = ""
return (sol_constants.MAP_HELPER_CODE).format(
name = util.to_camel_case(f.name),
val_name = "self.{0}".format(f.name),
map_name = "self._size_{0}".format(f.name),
key_type = key_type,
value_type = value_type,
field_type = field_type,
value_storage_type = value_storage_type,
key_storage_type = "memory" if util.is_complex_type(key_type) else "",
container_type = util.gen_global_type_name_from_field(f)
)
def gen_array_helper_codes_for_field(f: FieldDescriptor) -> str:
field_type = util.gen_global_type_name_from_field(f)
return (sol_constants.ARRAY_HELPER_CODE).format(
name = util.to_camel_case(f.name),
val_name = "self.{0}".format(f.name),
field_type = field_type,
field_storage_type = "memory" if util.is_complex_type(field_type) else ""
)
def gen_map_helper(nested_type: Descriptor, parent_msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
if nested_type.GetOptions().map_entry:
map_fields = list(filter(
lambda f: f.message_type and f.message_type is nested_type,
parent_msg.fields))
all_map_fields.extend(map_fields)
return ''.join(map(lambda f: gen_map_helper_codes_for_field(f, nested_type), map_fields))
else:
return ''
def gen_map_helpers(msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
return ''.join(map((lambda nt: gen_map_helper(nt, msg, all_map_fields)), msg.nested_types))
def gen_array_helpers(msg: Descriptor, all_map_fields: List[FieldDescriptor]) -> str:
array_fields = filter(lambda t: util.field_is_repeated(t) and t not in all_map_fields, msg.fields)
return ''.join(map(lambda f: gen_array_helper_codes_for_field(f), array_fields))
def gen_codec(msg: Descriptor, delegate_codecs: List[str]):
delegate_lib_name = util.gen_delegate_lib_name(msg)
all_map_fields = []
# delegate codec
delegate_codecs.append(sol_constants.CODECS.format(
delegate_lib_name = delegate_lib_name,
enum_definition = gen_enum_definition(msg),
struct_definition = gen_struct_definition(msg),
decoder_section = gen_decoder_section(msg),
encoder_section = gen_encoder_section(msg),
store_function = gen_store_function(msg),
map_helper = gen_map_helpers(msg, all_map_fields),
array_helper = gen_array_helpers(msg, all_map_fields),
utility_functions = gen_utility_functions(msg)
))
for nested in msg.nested_types:
nested = nested if not util.ALLOW_RESERVED_KEYWORDS else util.MessageWrapper(nested)
gen_codec(nested, delegate_codecs)
def gen_global_enum(file: FileDescriptor, delegate_codecs: List[str]):
"""Generates the following parts.
library FILE_NAME_GLOBAL_ENUMS {
enum Foo { ... }
function encode_Foo(...) { ... }
function decode_Foo(...) { ... }
enum Bar { ... }
function encode_Bar(...) { ... }
function decode_Bar(...) { ... }
...
}
"""
delegate_codecs.append(sol_constants.GLOBAL_ENUM_CODECS.format(
delegate_lib_name = util.gen_global_enum_name(file),
enum_definition = gen_enum_definition(file),
))
RUNTIME_FILE_NAME = "ProtoBufRuntime.sol"
PROTOBUF_ANY_FILE_NAME = "GoogleProtobufAny.sol"
GEN_RUNTIME = False
COMPILE_META_SCHEMA = False
def apply_options(params_string):
global GEN_RUNTIME
params = util.parse_urllike_parameter(params_string)
if 'gen_runtime' in params and 'use_runtime' in params:
raise ValueError('"gen_runtime" and "use_runtime" cannot be used together')
if "gen_runtime" in params:
GEN_RUNTIME = True
change_runtime_file_names(params["gen_runtime"])
if "use_runtime" in params:
GEN_RUNTIME = False
change_runtime_file_names(params["use_runtime"])
if "ignore_protos" in params:
util.set_ignored_protos(params["ignore_protos"])
if "pb_libname" in params:
util.change_pb_libname_prefix(params["pb_libname"])
if "for_linking" in params:
sys.stderr.write("warning: for_linking option is still under experiment due to slow-pace of solidity development\n")
util.set_library_linking_mode()
if "gen_internal_lib" in params:
util.set_internal_linking_mode()
if "use_builtin_enum" in params:
sys.stderr.write("warning: use_builtin_enum option is still under experiment because we cannot set value to solidity's enum\n")
util.set_enum_as_constant(True)
if "compile_meta_schema" in params:
global COMPILE_META_SCHEMA
COMPILE_META_SCHEMA = True
if "solc_version" in params:
util.set_solc_version(params["solc_version"])
if "allow_reserved_keywords" in params:
util.set_allow_reserved_keywords(True)
def change_runtime_file_names(name: str):
if not name.endswith(".sol"):
raise ValueError('Only *.sol file is acceptable, but {0} is specified'.format(name))
global RUNTIME_FILE_NAME, PROTOBUF_ANY_FILE_NAME
RUNTIME_FILE_NAME = name
# GoogleProtobufAny.sol and ProtoBufRuntime.sol must be put together in the same directory
PROTOBUF_ANY_FILE_NAME = os.path.join(
os.path.dirname(RUNTIME_FILE_NAME),
os.path.basename(PROTOBUF_ANY_FILE_NAME))
def gen_output_path(dependency: FileDescriptor) -> str:
dirname = os.path.dirname(dependency.name)
basename = os.path.basename(dependency.name).replace('.proto', '.sol')
if dependency.GetOptions().HasExtension(solpbext.file_options):
opts = dependency.GetOptions().Extensions[solpbext.file_options]
if opts.dirpath:
dirname = opts.dirpath
if dirname:
return '{0}/{1}'.format(dirname, basename)
else:
return '{0}'.format(basename)
def gen_relative_import_path(target: str, start: str) -> str:
target = os.path.join('root', target)
start = os.path.join('root', start)
d = os.path.relpath(os.path.dirname(target), os.path.dirname(start))
if not d.startswith('.'):
d = os.path.join('.', d)
return os.path.join(d, os.path.basename(target))
def generate_code(request, response):
pool = DescriptorPool()
for f in request.proto_file:
pool.Add(f)
generated = 0
apply_options(request.parameter)
for proto_file in map(lambda f: pool.FindFileByName(f.name), request.proto_file):
# skip google.protobuf namespace
if (proto_file.package == "google.protobuf") and (not COMPILE_META_SCHEMA):
continue
# skip native solidity type definition
if proto_file.package == "solidity":
continue
# skip descriptors listed by ignored_protos
if util.ignores_proto(proto_file.name):
continue
# main output
output = []
output_path = gen_output_path(proto_file)
# generate sol library
# prologue
output.append('// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};'.format(util.SOLIDITY_VERSION))
for pragma in util.SOLIDITY_PRAGMAS:
output.append('{0};'.format(pragma))
if GEN_RUNTIME:
output.append('import "{0}";'.format(gen_relative_import_path(RUNTIME_FILE_NAME, output_path)))
output.append('import "{0}";'.format(gen_relative_import_path(PROTOBUF_ANY_FILE_NAME, output_path)))
else:
output.append('import "{0}";'.format(RUNTIME_FILE_NAME))
output.append('import "{0}";'.format(PROTOBUF_ANY_FILE_NAME))
for dep in proto_file.dependencies:
if dep.package == "solidity":
continue
if (dep.package == "google.protobuf") and (not COMPILE_META_SCHEMA):
continue
if util.ignores_proto(dep.name):
continue
dep_output_path = gen_output_path(dep)
output.append('import "{0}";'.format(gen_relative_import_path(dep_output_path, output_path)))
# generate per message codes
delegate_codecs = []
for msg in proto_file.message_types_by_name.values():
msg = msg if not util.ALLOW_RESERVED_KEYWORDS else util.MessageWrapper(msg)
gen_codec(msg, delegate_codecs)
if len(proto_file.enum_types_by_name):
gen_global_enum(proto_file, delegate_codecs)
# epilogue
output = output + delegate_codecs
if len(delegate_codecs) > 0: # if it has any contents, output pb.sol file
# Fill response
f = response.file.add()
f.name = output_path
f.content = '\n'.join(output)
# increase generated file count
generated = generated + 1
if generated > 0 and GEN_RUNTIME:
try:
with open(os.path.dirname(os.path.realpath(__file__)) + '/runtime/ProtoBufRuntime.sol', 'r') as runtime:
rf = response.file.add()
rf.name = RUNTIME_FILE_NAME
rf.content = '// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};\n'.format(util.SOLIDITY_VERSION) + runtime.read()
except Exception as e:
sys.stderr.write(
"required to generate solidity runtime at {} but cannot open runtime with error {}\n".format(
RUNTIME_FILE_NAME, e
)
)
try:
with open(os.path.dirname(os.path.realpath(__file__)) + '/runtime/GoogleProtobufAny.sol', 'r') as runtime:
rf = response.file.add()
rf.name = PROTOBUF_ANY_FILE_NAME
rf.content = '// SPDX-License-Identifier: Apache-2.0\npragma solidity ^{0};\n'.format(util.SOLIDITY_VERSION) + runtime.read()
except Exception as e:
sys.stderr.write(
"required to generate solidity runtime at {} but cannot open runtime with error {}\n".format(
PROTOBUF_ANY_FILE_NAME, e
)
)
if __name__ == '__main__':
# Read request message from stdin
if hasattr(sys.stdin, 'buffer'):
data = sys.stdin.buffer.read()
else:
data = sys.stdin.read()
# Parse request
request = plugin.CodeGeneratorRequest()
request.ParseFromString(data)
# pp.pprint(request)
# Create response
response = plugin.CodeGeneratorResponse()
# Generate code
generate_code(request, response)
# Serialise response message
output = response.SerializeToString()
# Write to stdout
if hasattr(sys.stdin, 'buffer'):
sys.stdout.buffer.write(output)
else:
sys.stdout.write(output)
|
nilq/baby-python
|
python
|
import json
import os
import toml
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
CONFIG_FILE = 'config.toml'
CONFIG_FILE_DIR = os.path.join(BASE_DIR, CONFIG_FILE)
CONFIG_DATA = toml.load(CONFIG_FILE_DIR)
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = os.environ['SECRET_KEY']
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = CONFIG_DATA['settings']['DEBUG']
ALLOWED_HOSTS = CONFIG_DATA['settings']['ALLOWED_HOSTS']
# Application definition
INSTALLED_APPS = [
'modeltranslation',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'ckeditor',
'ckeditor_uploader',
'mptt',
'snowpenguin.django.recaptcha3',
'debug_toolbar',
'settings.apps.SettingsConfig',
'users.apps.UsersConfig',
'news.apps.NewsConfig',
'email_notification.apps.EmailSendConfig',
'comments.apps.CommentsConfig',
]
AUTH_USER_MODEL = 'users.UserModel'
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.locale.LocaleMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'debug_toolbar.middleware.DebugToolbarMiddleware',
'django_currentuser.middleware.ThreadLocalUserMiddleware',
]
DOMAIN_URL = CONFIG_DATA['settings']['DOMAIN_URL']
INTERNAL_IPS = CONFIG_DATA['settings']['INTERNAL_IPS']
ROOT_URLCONF = 'django_news.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'django_news.wsgi.application'
# REDIS & CELERY
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
REDIS_URL = 'redis://' + REDIS_HOST + ':' + REDIS_PORT + '/0'
CELERY_BROKER_URL = REDIS_URL
# Database
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
CACHES = {
'default': {
'BACKEND': 'django_redis.cache.RedisCache',
'LOCATION': REDIS_URL,
'OPTIONS': {
'CLIENT_CLASS': 'django_redis.client.DefaultClient',
}
}
}
# Password validation
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
gettext = lambda s: s
LANGUAGES = (
('ru', gettext('Russian')),
('en', gettext('English')),
)
LOCALE_PATHS = (os.path.join(BASE_DIR, 'locale'),)
MODELTRANSLATION_DEFAULT_LANGUAGE = 'en'
CKEDITOR_UPLOAD_PATH = 'uploads/'
CKEDITOR_CONFIGS = {
'default': {
'skin': 'moono-lisa',
'toolbar_Basic': [
['Source', '-', 'Bold', 'Italic']
],
'toolbar_YourCustomToolbarConfig': [
{'name': 'document', 'items': ['Source', '-', 'Save', 'NewPage', 'Preview', 'Print', '-', 'Templates']},
{'name': 'clipboard', 'items': ['Cut', 'Copy', 'Paste', 'PasteText', 'PasteFromWord', '-', 'Undo', 'Redo']},
{'name': 'editing', 'items': ['Find', 'Replace', '-', 'SelectAll']},
{'name': 'forms',
'items': ['Form', 'Checkbox', 'Radio', 'TextField', 'Textarea', 'Select', 'Button', 'ImageButton',
'HiddenField']},
'/',
{'name': 'basicstyles',
'items': ['Bold', 'Italic', 'Underline', 'Strike', 'Subscript', 'Superscript', '-', 'RemoveFormat']},
{'name': 'paragraph',
'items': ['NumberedList', 'BulletedList', '-', 'Outdent', 'Indent', '-', 'Blockquote', 'CreateDiv', '-',
'JustifyLeft', 'JustifyCenter', 'JustifyRight', 'JustifyBlock', '-', 'BidiLtr', 'BidiRtl',
'Language']},
{'name': 'links', 'items': ['Link', 'Unlink', 'Anchor']},
{'name': 'insert',
'items': ['Image', 'Flash', 'Table', 'HorizontalRule', 'Smiley', 'SpecialChar', 'PageBreak', 'Iframe']},
'/',
{'name': 'styles', 'items': ['Styles', 'Format', 'Font', 'FontSize']},
{'name': 'colors', 'items': ['TextColor', 'BGColor']},
{'name': 'tools', 'items': ['Maximize', 'ShowBlocks']},
{'name': 'about', 'items': ['About']},
'/', # put this to force next toolbar on new line
{'name': 'yourcustomtools', 'items': [
# put the name of your editor.ui.addButton here
'Preview',
'Maximize',
]},
],
'toolbar': 'YourCustomToolbarConfig', # put selected toolbar config here
'tabSpaces': 4,
'extraPlugins': ','.join([
'uploadimage', # the upload image feature
# your extra plugins here
'div',
'autolink',
'autoembed',
'embedsemantic',
'autogrow',
# 'devtools',
'widget',
'lineutils',
'clipboard',
'dialog',
'dialogui',
'elementspath'
]),
}
}
# STATIC & MEDIA
STATIC_URL = '/static/'
STATIC_ROOT = os.path.join(BASE_DIR, 'static')
STATICFILES_DIRS = [os.path.join(BASE_DIR, 'news/static')]
MEDIA_URL = '/media/'
MEDIA_ROOT = os.path.join(BASE_DIR, 'media')
# SMTP
EMAIL_HOST = CONFIG_DATA['smtp']['EMAIL_HOST']
EMAIL_USE_TLS = CONFIG_DATA['smtp']['EMAIL_USE_TLS']
EMAIL_USE_SSL = CONFIG_DATA['smtp']['EMAIL_USE_SSL']
EMAIL_PORT = CONFIG_DATA['smtp']['EMAIL_PORT']
EMAIL_HOST_USER = os.getenv('EMAIL_HOST_USER')
EMAIL_HOST_PASSWORD = os.getenv('EMAIL_HOST_PASSWORD')
# RECAPTCHA
RECAPTCHA_PUBLIC_KEY = os.getenv('RECAPTCHA_PUBLIC_KEY')
RECAPTCHA_PRIVATE_KEY = os.getenv('RECAPTCHA_PRIVATE_KEY')
RECAPTCHA_DEFAULT_ACTION = 'generic'
RECAPTCHA_SCORE_THRESHOLD = 0.5
SITE_ID = 1
|
nilq/baby-python
|
python
|
from location import GeoCoordinate, geo_to_cartesian
import time
class Value:
def __init__(self, value, unit):
self.value = value
self.unit = unit
class Measurement:
def __init__(self, row):
self.parameter = row["parameter"]
self.value = Value(row["value"], row["unit"])
self.location_geo = GeoCoordinate(row["latitude"], row["longitude"])
self.location = self.location_geo
self.source = row["source"]
self.time = time.strptime(row['date'], "%Y-%m-%dT%H:%M:%S.%fZ")
self.confidence = row['confidence']
def convert_location_to_cartesian(self):
self.location_cart = geo_to_cartesian(self.location_geo)
self.location = self.location_cart
def convert_location_to_geo(self):
self.location = self.location_geo
|
nilq/baby-python
|
python
|
"""
LC89. Gray Code
The gray code is a binary numeral system where two successive values differ in only one bit.
Given a non-negative integer n representing the total number of bits in the code, print the sequence of gray code. A gray code sequence must begin with 0.
Example 1:
Input: 2
Output: [0,1,3,2]
Explanation:
00 - 0
01 - 1
11 - 3
10 - 2
For a given n, a gray code sequence may not be uniquely defined.
For example, [0,2,3,1] is also a valid gray code sequence.
00 - 0
10 - 2
11 - 3
01 - 1
Example 2:
Input: 0
Output: [0]
Explanation: We define the gray code sequence to begin with 0.
A gray code sequence of n has size = 2n, which for n = 0 the size is 20 = 1.
Therefore, for n = 0 the gray code sequence is [0].
"""
# Runtime: 40 ms, faster than 22.57% of Python3 online submissions for Gray Code.
# Memory Usage: 14.7 MB, less than 5.26% of Python3 online submissions for Gray Code.
class Solution:
def grayCode(self, n: int) -> List[int]:
if n == 0:
return [0]
res = {}
curr = "0" * n
self.dfs(res, curr, n, 0)
return [int(key, 2) for key,_ in sorted(res.items(), key=lambda x:x[1])]
def dfs(self, res, curr, n, index):
res[curr] = index
for i in range(n):
if curr[i] == "0":
tmp = curr[:i] + "1" + curr[i+1:]
else:
tmp = curr[:i] + "0" + curr[i+1:]
if tmp in res:
continue
self.dfs(res, tmp, n, index+1)
break
|
nilq/baby-python
|
python
|
"""The IPython HTML Notebook"""
import os
# Packagers: modify this line if you store the notebook static files elsewhere
DEFAULT_STATIC_FILES_PATH = os.path.join(os.path.dirname(__file__), "static")
del os
from .nbextensions import install_nbextension
|
nilq/baby-python
|
python
|
import unittest
import pytest
from anchore_engine.db import Image, get_thread_scoped_session
from anchore_engine.services.policy_engine.engine.tasks import ImageLoadTask
from anchore_engine.services.policy_engine.engine.policy.gate import ExecutionContext
from anchore_engine.services.policy_engine import _init_distro_mappings
from test.integration.services.policy_engine.fixtures import cls_test_data_env2, cls_anchore_db
from anchore_engine.subsys import logger
@pytest.fixture(scope='class')
def cls_fully_loaded_test_env(cls_test_data_env2, request):
"""
Load the test env, including a feed sync and image analysis. Places the env in the class's test_env and test_image vars
:param cls_test_data_env:
:param request:
:return:
"""
_init_distro_mappings()
from anchore_engine.services.policy_engine.engine.tasks import FeedsUpdateTask
t = FeedsUpdateTask()
t.execute()
for image_id, path in request.cls.test_env.image_exports():
logger.info(('Ensuring loaded: image id: {} from file: {}'.format(image_id, path)))
t = ImageLoadTask(image_id=image_id, user_id='0', url='file://' + path)
t.execute()
db = get_thread_scoped_session()
test_image = db.query(Image).get((request.cls.test_env.get_images_named(request.cls.__default_image__)[0][0], '0'))
request.cls.test_image = test_image
db.rollback()
@pytest.fixture(scope='class')
def cls_no_feeds_test_env(cls_test_data_env2, request):
"""
Same as fully_loaded_test_env but does not sync feeds
:param cls_test_data_env:
:param request:
:return:
"""
_init_distro_mappings()
for image_id, path in request.cls.test_env.image_exports():
logger.info(('Ensuring loaded: image id: {} from file: {}'.format(image_id, path)))
t = ImageLoadTask(image_id=image_id, user_id='0', url='file://' + path)
t.execute()
db = get_thread_scoped_session()
test_image = db.query(Image).get((request.cls.test_env.get_images_named(request.cls.__default_image__)[0][0], '0'))
request.cls.test_image = test_image
db.rollback()
class GateUnitTest(unittest.TestCase):
__default_image__ = 'node'
gate_clazz = None
def get_initialized_trigger(self, trigger_name, config=None, **kwargs):
clazz = self.gate_clazz.get_trigger_named(trigger_name)
trigger = clazz(self.gate_clazz, **kwargs)
context = ExecutionContext(db_session=get_thread_scoped_session(), configuration=config)
gate = trigger.gate_cls()
return trigger, gate, context
|
nilq/baby-python
|
python
|
#!/usr/bin/python
import csv
import pycurl
import json
def insertToElasticSearch(data):
esData={'year' : data[0],
'week': data[1],
'state' : data[2],
'area': data[3],
'location' : data[4],
'totalCase' : data[5],
'durationInDays': data[6],
'geo' : {
'lat' : data[7],
'lon' : data[8],
}
}
# 1 to 4 inclusive
#server = str(random.randrange(1,5))
server = "localhost"
c = pycurl.Curl()
url = 'http://' + server + ':9200/govmy/dengue/?pretty'
c.setopt(c.URL, url)
c.setopt(c.POSTFIELDS, json.dumps(esData))
c.perform()
with open('lokalitihotspot2015.csv', 'rb') as csvfile:
#with open('test.csv', 'rb') as csvfile:
propreader = csv.reader(csvfile)
next(csvfile)
for row in propreader:
insertToElasticSearch(row)
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
networking_calico.plugins.ml2.drivers.calico.test.lib
~~~~~~~~~~~
Common code for Neutron driver UT.
"""
import eventlet
import eventlet.queue
import inspect
import logging
import mock
import sys
# When you're working on a test and need to see logging - both from the test
# code and the code _under_ test - uncomment the following line.
#
# logging.basicConfig(level=logging.DEBUG)
_log = logging.getLogger(__name__)
sys.modules['etcd'] = m_etcd = mock.MagicMock()
sys.modules['neutron'] = m_neutron = mock.MagicMock()
sys.modules['neutron.agent'] = m_neutron.agent
sys.modules['neutron.agent.rpc'] = m_neutron.agent.rpc
sys.modules['neutron.common'] = m_neutron.common
sys.modules['neutron.common.exceptions'] = m_neutron.common.exceptions
sys.modules['neutron.db'] = m_neutron.db
sys.modules['neutron.db.models'] = m_neutron.db.models
sys.modules['neutron.db.models.l3'] = m_neutron.db.models.l3
sys.modules['neutron.openstack'] = m_neutron.openstack
sys.modules['neutron.openstack.common'] = m_neutron.openstack.common
sys.modules['neutron.openstack.common.db'] = m_neutron.openstack.common.db
sys.modules['neutron.plugins'] = m_neutron.plugins
sys.modules['neutron.plugins.ml2'] = m_neutron.plugins.ml2
sys.modules['neutron.plugins.ml2.drivers'] = m_neutron.plugins.ml2.drivers
sys.modules['neutron.plugins.ml2.rpc'] = m_neutron.plugins.ml2.rpc
sys.modules['sqlalchemy'] = m_sqlalchemy = mock.Mock()
sys.modules['sqlalchemy.orm'] = m_sqlalchemy.orm
sys.modules['sqlalchemy.orm.exc'] = m_sqlalchemy.orm.exc
sys.modules['networking_calico.compat'] = m_compat = mock.MagicMock()
port1 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-1',
'id': 'DEADBEEF-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-1',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-10.65.0--24',
'ip_address': '10.65.0.2'}],
'mac_address': '00:11:22:33:44:55',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
port2 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-1',
'id': 'FACEBEEF-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-2',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-10.65.0--24',
'ip_address': '10.65.0.3'}],
'mac_address': '00:11:22:33:44:66',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
# Port with an IPv6 address.
port3 = {'binding:vif_type': 'tap',
'binding:host_id': 'felix-host-2',
'id': 'HELLO-1234-5678',
'network_id': 'calico-network-id',
'device_id': 'instance-3',
'device_owner': 'compute:nova',
'fixed_ips': [{'subnet_id': 'subnet-id-2001:db8:a41:2--64',
'ip_address': '2001:db8:a41:2::12'}],
'mac_address': '00:11:22:33:44:66',
'admin_state_up': True,
'security_groups': ['SGID-default'],
'status': 'ACTIVE'}
floating_ports = [{'fixed_port_id': 'DEADBEEF-1234-5678',
'fixed_ip_address': '10.65.0.2',
'floating_ip_address': '192.168.0.1'}]
class EtcdException(Exception):
pass
class EtcdKeyNotFound(EtcdException):
pass
class EtcdClusterIdChanged(EtcdException):
pass
class EtcdEventIndexCleared(EtcdException):
pass
class EtcdValueError(EtcdException):
pass
class EtcdDirNotEmpty(EtcdValueError):
pass
m_etcd.EtcdException = EtcdException
m_etcd.EtcdKeyNotFound = EtcdKeyNotFound
m_etcd.EtcdClusterIdChanged = EtcdClusterIdChanged
m_etcd.EtcdEventIndexCleared = EtcdEventIndexCleared
m_etcd.EtcdValueError = EtcdValueError
m_etcd.EtcdDirNotEmpty = EtcdDirNotEmpty
class DBError(Exception):
pass
m_compat.db_exc.DBError = DBError
class NoResultFound(Exception):
pass
m_sqlalchemy.orm.exc.NoResultFound = NoResultFound
# Define a stub class, that we will use as the base class for
# CalicoMechanismDriver.
class DriverBase(object):
def __init__(self, agent_type, vif_type, vif_details):
pass
# Define another stub class that mocks out leader election: assume we're always
# the leader. This is a fake elector: it never votes (get it!?).
class GrandDukeOfSalzburg(object):
def __init__(self, *args, **kwargs):
pass
def master(self):
return True
def stop(self):
pass
# Replace Neutron's SimpleAgentMechanismDriverBase - which is the base class
# that CalicoMechanismDriver inherits from - with this stub class.
m_neutron.plugins.ml2.drivers.mech_agent.SimpleAgentMechanismDriverBase = \
DriverBase
# Import all modules used by the mechanism driver so we can hook their logging.
from networking_calico import datamodel_v3
from networking_calico import etcdutils
from networking_calico import etcdv3
from networking_calico.plugins.ml2.drivers.calico import election
from networking_calico.plugins.ml2.drivers.calico import endpoints
from networking_calico.plugins.ml2.drivers.calico import mech_calico
from networking_calico.plugins.ml2.drivers.calico import policy
from networking_calico.plugins.ml2.drivers.calico import status
from networking_calico.plugins.ml2.drivers.calico import subnets
from networking_calico.plugins.ml2.drivers.calico import syncer
# Replace the elector.
mech_calico.Elector = GrandDukeOfSalzburg
REAL_EVENTLET_SLEEP_TIME = 0.01
# Value used to indicate 'timeout' in poll and sleep processing.
TIMEOUT_VALUE = object()
class Lib(object):
# Ports to return when the driver asks the OpenStack database for all
# current ports.
osdb_ports = []
# Subnets that the OpenStack database knows about.
osdb_subnets = []
def setUp(self):
# Announce the current test case.
_log.info("TEST CASE: %s", self.id())
# Mock calls to sys.exit.
self.sys_exit_p = mock.patch("sys.exit")
self.sys_exit_p.start()
# Hook eventlet.
self.setUp_eventlet()
# Hook logging.
self.setUp_logging()
# If an arg mismatch occurs, we want to see the complete diff of it.
self.maxDiff = None
# Create an instance of CalicoMechanismDriver.
mech_calico.mech_driver = None
self.driver = mech_calico.CalicoMechanismDriver()
# Hook the (mock) Neutron database.
self.db = mech_calico.plugin_dir.get_plugin()
self.db_context = mech_calico.ctx.get_admin_context()
self.db_context.session.query.return_value.filter_by.side_effect = (
self.port_query
)
# Arrange what the DB's get_ports will return.
self.db.get_ports.side_effect = self.get_ports
self.db.get_port.side_effect = self.get_port
# Arrange DB's get_subnet and get_subnets calls.
self.db.get_subnet.side_effect = self.get_subnet
self.db.get_subnets.side_effect = self.get_subnets
# Arrange what the DB's get_security_groups query will return (the
# default SG).
self.db.get_security_groups.return_value = [
{'id': 'SGID-default',
'security_group_rules': [
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv4',
'port_range_min': -1},
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv6',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv4',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv6',
'port_range_min': -1}
]}
]
self.db.get_security_group_rules.return_value = [
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv4',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': 'SGID-default',
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'ingress',
'ethertype': 'IPv6',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'ethertype': 'IPv4',
'security_group_id': 'SGID-default',
'port_range_min': -1},
{'remote_group_id': None,
'remote_ip_prefix': None,
'protocol': -1,
'direction': 'egress',
'security_group_id': 'SGID-default',
'ethertype': 'IPv6',
'port_range_min': -1}
]
self.db._get_port_security_group_bindings.side_effect = (
self.get_port_security_group_bindings
)
self.port_security_group_bindings = [
{'port_id': 'DEADBEEF-1234-5678',
'security_group_id': 'SGID-default'},
{'port_id': 'FACEBEEF-1234-5678',
'security_group_id': 'SGID-default'},
{'port_id': 'HELLO-1234-5678',
'security_group_id': 'SGID-default'},
]
def setUp_eventlet(self):
"""setUp_eventlet
Setup to intercept sleep calls made by the code under test, and hence
to (i) control when those expire, and (ii) allow time to appear to pass
(to the code under test) without actually having to wait for that time.
"""
# Reset the simulated time (in seconds) that has passed since the
# beginning of the test.
self.current_time = 0
# Make time.time() return current_time.
self.old_time = sys.modules['time'].time
sys.modules['time'].time = lambda: self.current_time
# Reset the dict of current sleepers. In each dict entry, the key is
# an eventlet.Queue object and the value is the time at which the sleep
# should complete.
self.sleepers = {}
# Reset the list of spawned eventlet threads.
self.threads = []
# Replacement for eventlet.sleep: sleep for some simulated passage of
# time (as directed by simulated_time_advance), instead of for real
# elapsed time.
def simulated_time_sleep(secs=None):
if secs is None:
# Thread just wants to yield to any other waiting thread.
self.give_way()
return
# Create a new queue.
queue = eventlet.Queue(1)
queue.stack = inspect.stack()[1][3]
# Add it to the dict of sleepers, together with the waking up time.
self.sleepers[queue] = self.current_time + secs
_log.info("T=%s: %s: Start sleep for %ss until T=%s",
self.current_time,
queue.stack,
secs,
self.sleepers[queue])
# Do a zero time real sleep, to allow other threads to run.
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
# Block until something is posted to the queue.
queue.get(True)
# Wake up.
return None
# Replacement for eventlet.spawn: track spawned threads so that we can
# kill them all when a test case ends.
def simulated_spawn(*args):
# Do the real spawn.
thread = self.real_eventlet_spawn(*args)
# Remember this thread.
self.threads.append(thread)
_log.info("New thread %s", thread)
# Also return it.
return thread
def simulated_spawn_after(secs, fn, *args):
def sleep_then_run():
simulated_time_sleep(secs)
fn(*args)
return simulated_spawn(sleep_then_run)
# Hook sleeping.
self.real_eventlet_sleep = eventlet.sleep
eventlet.sleep = simulated_time_sleep
# Similarly hook spawning.
self.real_eventlet_spawn = eventlet.spawn
eventlet.spawn = simulated_spawn
self.real_eventlet_spawn_after = eventlet.spawn_after
eventlet.spawn_after = simulated_spawn_after
def setUp_logging(self):
"""Setup to intercept and display logging by the code under test.
To see this logging, you also need to uncomment the logging.basicConfig
call near the top of this file.
"""
import logging
for module in [
election,
endpoints,
mech_calico,
policy,
status,
subnets,
syncer,
datamodel_v3,
etcdutils,
etcdv3,
]:
module.LOG = logging.getLogger("\t%-15s\t" %
module.__name__.split('.')[-1])
# Tear down after each test case.
def tearDown(self):
_log.info("Clean up remaining green threads...")
for thread in self.threads:
_log.info("Kill thread %s", thread)
thread.kill()
_log.info("All threads killed")
# Stop hooking eventlet.
self.tearDown_eventlet()
# Stop mocking sys.exit.
self.sys_exit_p.stop()
def tearDown_eventlet(self):
# Restore the real eventlet.sleep and eventlet.spawn.
eventlet.sleep = self.real_eventlet_sleep
eventlet.spawn = self.real_eventlet_spawn
eventlet.spawn_after = self.real_eventlet_spawn_after
# Repair time.time()
sys.modules['time'].time = self.old_time
# Method for the test code to call when it wants to advance the simulated
# time.
def simulated_time_advance(self, secs):
while (secs > 0):
_log.info("T=%s: Want to advance by %s", self.current_time, secs)
# Determine the time to advance to in this iteration: either the
# full time that we've been asked for, or the time at which the
# next sleeper should wake up, whichever of those is earlier.
wake_up_time = self.current_time + secs
for queue in self.sleepers.keys():
if self.sleepers[queue] < wake_up_time:
# This sleeper will wake up before the time that we've been
# asked to advance to.
wake_up_time = self.sleepers[queue]
# Advance to the determined time.
secs -= (wake_up_time - self.current_time)
self.current_time = wake_up_time
_log.info("T=%s", self.current_time)
# Wake up all sleepers that should now wake up.
for queue in self.sleepers.keys():
if self.sleepers[queue] <= self.current_time:
_log.info("T=%s >= %s: %s: Wake up!",
self.current_time,
self.sleepers[queue],
queue.stack)
del self.sleepers[queue]
queue.put_nowait(TIMEOUT_VALUE)
# Allow woken (and possibly other) threads to run.
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
def give_way(self):
"""give_way
Method for test code to call when it wants to allow other eventlet
threads to run.
"""
self.real_eventlet_sleep(REAL_EVENTLET_SLEEP_TIME)
def check_update_port_status_called(self, context):
self.db.update_port_status.assert_called_once_with(
context._plugin_context,
context._port['id'],
mech_calico.constants.PORT_STATUS_ACTIVE)
self.db.update_port_status.reset_mock()
def get_port(self, context, port_id):
return self.get_ports(context, filters={'id': [port_id]})[0]
def get_ports(self, context, filters=None):
if filters is None:
return self.osdb_ports
assert filters.keys() == ['id']
allowed_ids = set(filters['id'])
return [p for p in self.osdb_ports if p['id'] in allowed_ids]
def get_subnet(self, context, id):
matches = [s for s in self.osdb_subnets if s['id'] == id]
if matches and len(matches) == 1:
return matches[0]
elif ':' in id:
return {'gateway_ip': '2001:db8:a41:2::1'}
else:
return {'gateway_ip': '10.65.0.1'}
def get_subnets(self, context, filters=None):
if filters:
self.assertTrue('id' in filters)
matches = [s for s in self.osdb_subnets
if s['id'] in filters['id']]
else:
matches = [s for s in self.osdb_subnets]
return matches
def notify_security_group_update(self, id, rules, port, type):
"""Notify a new or changed security group definition."""
# Prep appropriate responses for next get_security_group and
# _get_port_security_group_bindings calls.
self.db.get_security_group.return_value = {
'id': id,
'security_group_rules': rules
}
if port is None:
self.db._get_port_security_group_bindings.return_value = []
else:
self.db._get_port_security_group_bindings.return_value = [
{'port_id': port['id']}
]
self.db.get_port.return_value = port
if type == 'rule':
# Call security_groups_rule_updated with the new or changed ID.
mech_calico.security_groups_rule_updated(
mock.MagicMock(), mock.MagicMock(), [id]
)
def get_port_security_group_bindings(self, context, filters):
if filters is None:
return self.port_security_group_bindings
assert filters.keys() == ['port_id']
allowed_ids = set(filters['port_id'])
return [b for b in self.port_security_group_bindings
if b['port_id'] in allowed_ids]
def port_query(self, **kw):
if kw.get('port_id', None):
for port in self.osdb_ports:
if port['id'] == kw['port_id']:
return port['fixed_ips']
elif kw.get('fixed_port_id', None):
fips = []
for fip in floating_ports:
if fip['fixed_port_id'] == kw['fixed_port_id']:
fips.append(fip)
return fips
else:
raise Exception("port_query doesn't know how to handle kw=%r" % kw)
return None
class FixedUUID(object):
def __init__(self, uuid):
self.uuid = uuid
self.uuid4_p = mock.patch('uuid.uuid4')
def __enter__(self):
guid = mock.MagicMock()
guid.get_hex.return_value = self.uuid
guid.__str__.return_value = self.uuid
uuid4 = self.uuid4_p.start()
uuid4.return_value = guid
def __exit__(self, type, value, traceback):
self.uuid4_p.stop()
|
nilq/baby-python
|
python
|
# vim: fdm=marker
'''
author: Fabio Zanini
date: 11/12/14
content: Get trees of haplotype alignments.
'''
# Modules
import os
import argparse
from operator import itemgetter, attrgetter
import numpy as np
from matplotlib import cm
import matplotlib.pyplot as plt
from Bio import Phylo
from hivwholeseq.patients.patients import load_patients, Patient
from hivwholeseq.utils.sequence import align_muscle
from hivwholeseq.utils.tree import build_tree_fasttree
from hivwholeseq.utils.argparse import RoiAction
from hivwholeseq.store.store_tree_consensi import annotate_tree
from hivwholeseq.utils.nehercook.ancestral import ancestral_sequences
from hivwholeseq.utils.tree import tree_to_json, filter_rare_leaves
from hivwholeseq.utils.generic import write_json
# Functions
def load_alignments(filename):
'''Load alignments from website file'''
import zipfile, zlib
from Bio import AlignIO
import StringIO
alis = []
with zipfile.ZipFile(filename, 'r') as zf:
for fn in zf.namelist():
f = StringIO.StringIO(zf.read(fn))
ali = {'time': float(fn.split('_')[0]),
'ali': AlignIO.read(f, 'fasta')}
alis.append(ali)
return alis
def get_region_count_trajectories(patient, region, VERBOSE=0, countmin=5):
'''Get haplotype trajectories in a region (from the website alignments)'''
import numpy as np
from hivwholeseq.website.filenames import get_precompiled_alignments_filename
filename = get_precompiled_alignments_filename(patient.code, region)
alis = load_alignments(filename)
seqs_set = set()
for ali in alis:
seqs_set |= set([''.join(seq).replace('-', '')
for seq in ali['ali']
if int(seq.name.split('_')[1]) >= countmin])
seqs_set = list(seqs_set)
hct = np.zeros((len(seqs_set), len(alis)), int)
for it, ali in enumerate(alis):
for seq in ali['ali']:
s = ''.join(seq).replace('-', '')
count = int(seq.name.split('_')[1])
if count < countmin:
continue
iseq = seqs_set.index(s)
hct[iseq, it] = count
seqs_set = np.array(seqs_set, 'S'+str(np.max(map(len, seqs_set))))
times = np.array(map(itemgetter('time'), alis))
ind = np.array([i for i, t in enumerate(patient.times) if t in times])
# Filter out all time points without any counts
ind_keep = hct.any(axis=0)
ind = ind[ind_keep]
hct = hct[:, ind_keep]
return (hct.T, ind, seqs_set)
def annotate_tree_for_plot(tree, minfreq=0.02):
'''Add annotations for plotting'''
from matplotlib import cm
cmap = cm.jet
last_tp = max(leaf.DSI for leaf in tree.get_terminals())
def get_color(node):
return map(int, np.array(cmap(node.DSI/last_tp*0.9)[:-1]) * 255)
# Annotate leaves
for leaf in tree.get_terminals():
leaf.color = get_color(leaf)
if leaf.frequency >= minfreq:
leaf.label = ('t = '+str(int(leaf.DSI))+
', f = '+'{:1.2f}'.format(leaf.frequency))
else:
leaf.label = ''
# Color internal branches
for node in tree.get_nonterminals(order='postorder'):
node.label = ''
node.DSI = np.mean([c.DSI for c in node.clades])
node.color = get_color(node)
# Script
if __name__ == '__main__':
# Parse input args
parser = argparse.ArgumentParser(description='Get local trees',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--patients', nargs='+',
help='Patients to analyze')
parser.add_argument('--roi', required=True, action=RoiAction,
help='Region of interest (e.g. F1 300 350 or V3 0 +oo)')
parser.add_argument('--verbose', type=int, default=0,
help='Verbosity level [0-4]')
parser.add_argument('--maxreads', type=int, default=-1,
help='Number of reads analyzed per sample')
parser.add_argument('--plot', action='store_true',
help='Plot local haplotype trajectories')
parser.add_argument('--freqmin', type=float, default=0.01,
help='Minimal frequency to keep the haplotype')
args = parser.parse_args()
pnames = args.patients
roi = args.roi
VERBOSE = args.verbose
maxreads = args.maxreads
use_plot = args.plot
freqmin = args.freqmin
patients = load_patients()
if pnames is not None:
patients = patients.loc[pnames]
for pname, patient in patients.iterrows():
patient = Patient(patient)
if VERBOSE >= 1:
print pname
if os.path.isfile(patient.get_local_tree_filename(roi[0], format='json')):
if VERBOSE >= 2:
print 'Get tree'
region = roi[0]
tree = patient.get_local_tree(region)
elif os.path.isfile(patient.get_local_tree_filename(' '.join(map(str, roi)), format='json')):
if VERBOSE >= 2:
print 'Get tree'
region = ' '.join(map(str, roi))
tree = patient.get_local_tree(region)
else:
raise IOError('Tree file not found')
if VERBOSE >= 2:
print 'Filter out too rare leaves'
filter_rare_leaves(tree, freqmin, VERBOSE=VERBOSE)
if use_plot:
if VERBOSE >= 2:
print 'Annotate tree for plotting'
annotate_tree_for_plot(tree, minfreq=0.1)
if VERBOSE >= 2:
print 'Plot'
fig, ax = plt.subplots()
ax.set_title(patient.code+', '+region)
Phylo.draw(tree, axes=ax, do_show=False, label_func=attrgetter('label'),
show_confidence=False)
ax.grid(True)
ax.set_ylim(ax.get_ylim()[0] * 1.04, -ax.get_ylim()[0] * 0.04)
plt.tight_layout()
plt.ion()
plt.show()
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from csvkit.unicsv import UnicodeCSVReader, UnicodeCSVWriter
class CSVKitReader(UnicodeCSVReader):
"""
A unicode-aware CSV reader with some additional features.
"""
pass
class CSVKitWriter(UnicodeCSVWriter):
"""
A unicode-aware CSV writer with some additional features.
"""
def __init__(self, f, encoding='utf-8', line_numbers=False, **kwargs):
self.row_count = 0
self.line_numbers = line_numbers
UnicodeCSVWriter.__init__(self, f, encoding, lineterminator='\n', **kwargs)
def _append_line_number(self, row):
if self.row_count == 0:
row.insert(0, 'line_number')
else:
row.insert(0, self.row_count)
self.row_count += 1
def writerow(self, row):
if self.line_numbers:
row = list(row)
self._append_line_number(row)
# Convert embedded Mac line endings to unix style line endings so they get quoted
row = [i.replace('\r', '\n') if isinstance(i, basestring) else i for i in row]
UnicodeCSVWriter.writerow(self, row)
def writerows(self, rows):
for row in rows:
self.writerow(row)
|
nilq/baby-python
|
python
|
#! /usr/bin/env python
from tkinter import NoDefaultRoot, Tk, ttk, filedialog
from _tkinter import getbusywaitinterval
from tkinter.constants import *
from math import sin, pi
import base64, zlib, os
################################################################################
ICON = b'eJxjYGAEQgEBBiApwZDBzMAgxsDAoAHEQCEGBQaIOAwkQDE2UOSkiUM\
Gp/rlyd740Ugzf8/uXROxAaA4VvVAqcfYAFCcoHqge4hR/+btWwgCqoez8aj//fs\
XWiAARfCrhyCg+XA2HvV/YACoHs4mRj0ywKWe1PD//p+B4QMOmqGeMAYAAY/2nw=='
################################################################################
class GUISizeTree(ttk.Frame):
@classmethod
def main(cls):
# Create the application's root.
NoDefaultRoot()
root = Tk()
# Restrict sizing and add title.
root.minsize(350, 175)
root.title('Directory Size')
# Create the application's icon.
with open('tree.ico', 'wb') as file:
file.write(zlib.decompress(base64.b64decode(ICON)))
root.iconbitmap('tree.ico')
os.remove('tree.ico')
# Configure the SizeTree object.
view = cls(root)
view.grid(row=0, column=0, sticky=NSEW)
# Setup the window for resizing.
root.grid_rowconfigure(0, weight=1)
root.grid_columnconfigure(0, weight=1)
# Enter the GUI main event loop.
root.mainloop()
def __init__(self, master=None, **kw):
super().__init__(master, **kw)
# Configure the progressbar.
self.__progress = ttk.Progressbar(self, orient=HORIZONTAL)
self.__progress.grid(row=0, column=0, columnspan=4, sticky=EW)
# Configure the tree.
self.__tree = ttk.Treeview(self, selectmode=BROWSE,
columns=('d_size', 'f_size', 'path'))
self.__tree.heading('#0', text=' Name', anchor=W)
self.__tree.heading('d_size', text=' Total Size', anchor=W)
self.__tree.heading('f_size', text=' File Size', anchor=W)
self.__tree.heading('path', text=' Path', anchor=W)
self.__tree.column('#0', minwidth=80, width=160)
self.__tree.column('d_size', minwidth=80, width=160)
self.__tree.column('f_size', minwidth=80, width=160)
self.__tree.column('path', minwidth=80, width=160)
self.__tree.grid(row=1, column=0, columnspan=3, sticky=NSEW)
# Configure the scrollbar.
self.__scroll = ttk.Scrollbar(self, orient=VERTICAL,
command=self.__tree.yview)
self.__tree.configure(yscrollcommand=self.__scroll.set)
self.__scroll.grid(row=1, column=3, sticky=NS)
# Configure the path button.
self.__label = ttk.Button(self, text='Path:', command=self.choose)
self.__label.bind('<Return>', self.choose)
self.__label.grid(row=2, column=0)
# Configure the directory dialog.
head, tail = os.getcwd(), True
while tail:
head, tail = os.path.split(head)
self.__dialog = filedialog.Directory(self, initialdir=head)
# Configure the path entry box.
self.__path = ttk.Entry(self, cursor='xterm')
self.__path.bind('<Control-Key-a>', self.select_all)
self.__path.bind('<Control-Key-/>', lambda event: 'break')
self.__path.bind('<Return>', self.search)
self.__path.grid(row=2, column=1, sticky=EW)
self.__path.focus_set()
# Configure the execution button.
self.__run = ttk.Button(self, text='Search', command=self.search)
self.__run.bind('<Return>', self.search)
self.__run.grid(row=2, column=2)
# Configure the sizegrip.
self.__grip = ttk.Sizegrip(self)
self.__grip.grid(row=2, column=3, sticky=SE)
# Configure the grid.
self.grid_rowconfigure(1, weight=1)
self.grid_columnconfigure(1, weight=1)
# Configure root item in tree.
self.__root = None
def choose(self, event=None):
# Get a directory path via a dialog.
path = self.__dialog.show()
if path:
# Fill entry box with user path.
self.__path.delete(0, END)
self.__path.insert(0, os.path.abspath(path))
def select_all(self, event):
# Select the contents of the widget.
event.widget.selection_range(0, END)
return 'break'
def search(self, event=None):
if self.__run['state'].string == NORMAL:
# Show background work progress.
self.__run['state'] = DISABLED
path = os.path.abspath(self.__path.get())
if os.path.isdir(path):
self.__progress.configure(mode='indeterminate', maximum=100)
self.__progress.start()
# Search while updating display.
if self.__root is not None:
self.__tree.delete(self.__root)
tree = SizeTree(self.update, path)
nodes = tree.total_nodes + 1
# Build user directory treeview.
self.__progress.stop()
self.__progress.configure(mode='determinate', maximum=nodes)
self.__root = self.__tree.insert('', END, text=tree.name)
self.build_tree(self.__root, tree)
# Indicate completion of search.
self.__run['state'] = NORMAL
else:
self.shake()
def shake(self):
# Check frame rate.
assert getbusywaitinterval() == 20, 'Values are hard-coded for 50 FPS.'
# Get application root.
root = self
while not isinstance(root, Tk):
root = root.master
# Schedule beginning of animation.
self.after_idle(self.__shake, root, 0)
def __shake(self, root, frame):
frame += 1
# Get the window's location and update X value.
x, y = map(int, root.geometry().split('+')[1:])
x += int(sin(pi * frame / 2.5) * sin(pi * frame / 50) * 5)
root.geometry('+{}+{}'.format(x, y))
# Schedule next frame or restore search button.
if frame < 50:
self.after(20, self.__shake, root, frame)
else:
self.__run['state'] = NORMAL
def build_tree(self, node, tree):
# Make changes to the treeview and progress bar.
text = 'Unknown!' if tree.dir_error else convert(tree.total_size)
self.__tree.set(node, 'd_size', text)
text = 'Unknown!' if tree.file_error else convert(tree.file_size)
self.__tree.set(node, 'f_size', text)
self.__tree.set(node, 'path', tree.path)
self.__progress.step()
# Update the display and extract any child node.
self.update()
for child in tree.children:
subnode = self.__tree.insert(node, END, text=child.name)
self.build_tree(subnode, child)
################################################################################
class SizeTree:
"Create a tree structure outlining a directory's size."
def __init__(self, callback, path):
callback()
self.path = path
head, tail = os.path.split(path)
self.name = tail or head
self.children = []
self.file_size = 0
self.total_size = 0
self.total_nodes = 0
self.file_error = False
self.dir_error = False
try:
dir_list = os.listdir(path)
except OSError:
self.dir_error = True
else:
for name in dir_list:
path_name = os.path.join(path, name)
if os.path.isdir(path_name):
size_tree = SizeTree(callback, path_name)
self.children.append(size_tree)
self.total_size += size_tree.total_size
self.total_nodes += size_tree.total_nodes + 1
elif os.path.isfile(path_name):
try:
self.file_size += os.path.getsize(path_name)
except OSError:
self.file_error = True
self.total_size += self.file_size
################################################################################
def convert(number):
"Convert bytes into human-readable representation."
if not number:
return '0 Bytes'
assert 0 < number < 1 << 110, 'number out of range'
ordered = reversed(tuple(format_bytes(partition_number(number, 1 << 10))))
cleaned = ', '.join(item for item in ordered if item[0] != '0')
return cleaned
def partition_number(number, base):
"Continually divide number by base until zero."
div, mod = divmod(number, base)
yield mod
while div:
div, mod = divmod(div, base)
yield mod
def format_bytes(parts):
"Format partitioned bytes into human-readable strings."
for power, number in enumerate(parts):
yield '{} {}'.format(number, format_suffix(power, number))
def format_suffix(power, number):
"Compute the suffix for a certain power of bytes."
return (PREFIX[power] + 'byte').capitalize() + ('s' if number != 1 else '')
PREFIX = ' kilo mega giga tera peta exa zetta yotta bronto geop'.split(' ')
################################################################################
if __name__ == '__main__':
GUISizeTree.main()
|
nilq/baby-python
|
python
|
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import subprocess
from typing import Tuple
import torch
try:
torch.classes.load_library(
f"{os.environ['CONDA_PREFIX']}/lib/libtorchscript_pinocchio.so"
)
except OSError:
print(
"Warning: Failed to load 'libtorchscript_pinocchio.so' from CONDA_PREFIX, loading from default build directory 'polymetis/build' instead..."
)
project_root_dir = (
subprocess.run(["git", "rev-parse", "--show-toplevel"], stdout=subprocess.PIPE)
.stdout.strip()
.decode("ascii")
)
torch.classes.load_library(
os.path.join(
project_root_dir,
"polymetis/build/libtorchscript_pinocchio.so",
)
)
class RobotModelPinocchio(torch.nn.Module):
"""
A robot model able to compute kinematics & dynamics of a robot given an urdf.
Implemented as a ``torch.nn.Module`` wrapped around a C++ custom class that leverages
`Pinocchio <https://github.com/stack-of-tasks/pinocchio>`_ -
a C++ rigid body dynamics library.
"""
def __init__(self, urdf_filename: str, ee_joint_name: str):
super().__init__()
self.model = torch.classes.torchscript_pinocchio.RobotModelPinocchio(
urdf_filename, ee_joint_name
)
def get_joint_angle_limits(self) -> torch.Tensor:
return self.model.get_joint_angle_limits()
def get_joint_velocity_limits(self) -> torch.Tensor:
return self.model.get_joint_velocity_limits()
def forward_kinematics(
self, joint_positions: torch.Tensor
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Computes end-effector position and orientation from a given joint position.
Args:
joint_positions: A given set of joint angles.
Returns:
Tuple[torch.Tensor, torch.Tensor]: End-effector position, end-effector orientation as quaternion
"""
pos, quat = self.model.forward_kinematics(joint_positions)
return pos.to(joint_positions), quat.to(joint_positions)
def compute_jacobian(self, joint_positions: torch.Tensor) -> torch.Tensor:
return self.model.compute_jacobian(joint_positions).to(joint_positions)
def inverse_dynamics(
self,
joint_positions: torch.Tensor,
joint_velocities: torch.Tensor,
joint_accelerations: torch.Tensor,
) -> torch.Tensor:
"""Computes the desired torques to achieve a certain joint acceleration from
given joint positions and velocities.
Returns:
torch.Tensor: desired torques
"""
return self.model.inverse_dynamics(
joint_positions, joint_velocities, joint_accelerations
).to(joint_positions)
|
nilq/baby-python
|
python
|
from unittest.mock import patch
from datetime import datetime
import httpx
import pytest
from src.zever_local.inverter import (
Inverter,
InverterData,
ZeversolarError,
ZeversolarTimeout,
)
_registry_id = "EAB241277A36"
_registry_key = "ZYXTBGERTXJLTSVS"
_hardware_version = "M11"
_software_version = "18625-797R+17829-719R"
_time = "16:22"
_date = "20/02/2022"
_serial_number = "ZS150045138C0104"
_content = f"1\n1\n{_registry_id}\n{_registry_key}\n{_hardware_version}\n{_software_version}\n{_time} {_date}\n1\n1\n{_serial_number}\n1234\n8.9\nOK\nError"
_content2 = f"1\n1\n{_registry_id}\n{_registry_key}\n{_hardware_version}\n{_software_version}\n{_time} {_date}\n1\n1\n{_serial_number}\n1234\n1.23\nOK\nError"
_byte_content = _content.encode()
async def test_async_connect():
"""Fetch the inverter info."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", "https://test.t"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
await my_inverter.async_connect()
mac_address = my_inverter.mac_address
serial_number = my_inverter.serial_number
assert mac_address == "EA-B2-41-27-7A-36"
assert serial_number == _serial_number
async def test_async_get_data():
"""Fetch inverter data."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter_data = await my_inverter.async_get_data()
energy_today_KWh = my_inverter_data.energy_today_KWh
assert energy_today_KWh == 8.09
async def test_async_get_data_ZeversolarError():
"""Fetch inverter data throws an error."""
url = "test"
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.async_get_data()
async def test_async_get_data_ZeversolarTimeout():
"""Fetch inverter data timouts."""
url = "test"
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.async_get_data()
async def test_async_connect_ZeversolarError():
"""Connect to inverter data throws an error."""
url = "test"
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.async_connect()
async def test_async_connect_ZeversolarTimeout():
"""Connect to inverter data timouts."""
url = "test"
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.async_connect()
def test_InverterData():
"""Test the inverter data class."""
my_inverter_data = InverterData(_content2.split('\n'))
energy_today_KWh = my_inverter_data.energy_today_KWh
unknown0 = my_inverter_data.unknown0
unknown1 = my_inverter_data.unknown1
registry_id = my_inverter_data.registry_id
registry_key = my_inverter_data.registry_key
hardware_version = my_inverter_data.hardware_version
software_version = my_inverter_data.software_version
my_datetime = my_inverter_data.datetime
communication_status = my_inverter_data.communication_status
unknown8 = my_inverter_data.unknown8
serial_number = my_inverter_data.serial_number
pac_watt = my_inverter_data.pac_watt
energy_today_KWh = my_inverter_data.energy_today_KWh
status = my_inverter_data.status
unknown13 = my_inverter_data.unknown13
mac_address = my_inverter_data.mac_address
assert unknown0 == '1'
assert unknown1 == '1'
assert registry_id == _registry_id
assert registry_key == _registry_key
assert hardware_version == _hardware_version
assert software_version == _software_version
assert datetime(2022, 2, 20, 16, 22) == my_datetime
assert communication_status == '1'
assert unknown8 == '1'
assert serial_number == _serial_number
assert pac_watt == 1234
assert energy_today_KWh == 1.23
assert status == 'OK'
assert unknown13 == "Error"
assert mac_address == "EA-B2-41-27-7A-36"
def test_InverterData_bugfix():
"""Test the inverter data class fixing the energy bug."""
my_inverter_data = InverterData(_content.split('\n'))
energy_today_KWh = my_inverter_data.energy_today_KWh
assert energy_today_KWh == 8.09
async def test_Inverter_power_on():
"""Power on inverter."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter = Inverter(url)
await my_inverter.async_connect()
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.return_value = mock_response
my_result = await my_inverter.power_on()
assert my_result
async def test_Inverter_power_off():
"""Power on inverter."""
url = "test"
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
my_inverter = Inverter(url)
await my_inverter.async_connect()
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.return_value = mock_response
my_result = await my_inverter.power_off()
assert my_result
async def test_Inverter_power_on_ZeversolarError():
"""Power off inverter."""
url = "test"
my_inverter = Inverter(url)
with pytest.raises(ZeversolarError):
my_inverter = Inverter(url)
await my_inverter.power_on()
async def test_Inverter_power_on_ZeversolarTimeout():
"""Power off inverter has a timeout."""
url = "test"
my_inverter = Inverter(url)
with pytest.raises(ZeversolarTimeout):
with patch("src.zever_local.inverter.httpx.AsyncClient.post") as mock_device_info:
mock_device_info.side_effect = httpx.TimeoutException("Timeout")
my_inverter = Inverter(url)
await my_inverter.power_on()
async def test_async_connect():
"""Fetch the inverter info."""
url = ""
my_inverter = Inverter(url)
mock_response = httpx.Response(
200, request=httpx.Request("Get", f"https://{url}"), content=_byte_content
)
with patch("src.zever_local.inverter.httpx.AsyncClient.get") as mock_device_info:
mock_device_info.return_value = mock_response
await my_inverter.async_connect()
mac_address = my_inverter.mac_address
serial_number = my_inverter.serial_number
assert mac_address == "EA-B2-41-27-7A-36"
assert serial_number == _serial_number
|
nilq/baby-python
|
python
|
"""
Tests for the game class
"""
import unittest
import numpy as np
from nashpy.algorithms.vertex_enumeration import vertex_enumeration
class TestVertexEnumeration(unittest.TestCase):
"""
Tests for the vertex enumeration algorithm
"""
def test_three_by_two_vertex_enumeration(self):
A = np.array([[3, 3], [2, 5], [0, 6]])
B = np.array([[3, 2], [2, 6], [3, 1]])
expected_equilibria = sorted(
[
(np.array([1, 0, 0]), np.array([1, 0])),
(np.array([0, 1 / 3, 2 / 3]), np.array([1 / 3, 2 / 3])),
(np.array([4 / 5, 1 / 5, 0]), np.array([2 / 3, 1 / 3])),
],
key=lambda a: list(np.round(a[0], 4)),
)
equilibria = sorted(
vertex_enumeration(A, B), key=lambda a: list(np.round(a[0], 4))
)
for equilibrium, expected_equilibrium in zip(equilibria, expected_equilibria):
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
self.assertTrue(all(np.isclose(strategy, expected_strategy)))
def test_with_negative_utilities(self):
A = np.array([[1, -1], [-1, 1]])
B = -A
expected_equilibrium = (np.array([0.5, 0.5]), np.array([0.5, 0.5]))
equilibrium = next(vertex_enumeration(A, B))
for strategy, expected_strategy in zip(equilibrium, expected_equilibrium):
assert all(np.isclose(strategy, expected_strategy)), strategy
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
__author__ = "Will Kamp"
__copyright__ = "Copyright 2013, Matrix Mariner Inc."
__license__ = "BSD"
__email__ = "will@mxmariner.com"
__status__ = "Development" # "Prototype", "Development", or "Production"
'''This is the wrapper program that ties it all together to complete this set of programs'
task of compiling charts into the MX Mariner format.
'''
import sys
import os
import inspect
current_dir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0, parent_dir)
from mxmcc import regions
from mxmcc import catalog
from mxmcc import tilebuilder
from mxmcc import tilesmerge
from mxmcc import gemf
from mxmcc import zdata
from mxmcc import verify
from mxmcc import tiles_opt
from mxmcc.checkpoint import *
from mxmcc import encryption_shim
import mbutil as mb
import re
import shutil
PROFILE_MX_R = 'MX_REGION' # (default) renders standard MX Mariner gemf + zdat
PROFILE_MB_C = 'MB_CHARTS' # renders each chart as mbtiles file
PROFILE_MB_R = 'MB_REGION' # renders entire region as mbtiles file
def _build_catalog(checkpoint_store, profile, region):
# build catalog
point = CheckPoint.CHECKPOINT_CATALOG
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building catalog for:', region)
if not regions.is_valid_region(region):
region_dir = regions.find_custom_region_path(region)
if region_dir is not None:
catalog.build_catalog_for_bsb_directory(region_dir, region)
else:
raise Exception('custom region: %s does not have a directory' % region)
else:
catalog.build_catalog_for_region(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_tiles(checkpoint_store, profile, region):
# create tiles
point = CheckPoint.CHECKPOINT_TILE_VERIFY
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building tiles for:', region)
tilebuilder.build_tiles_for_catalog(region)
# verify
if not verify.verify_catalog(region):
raise Exception(region + ' was not verified... ' + verify.error_message)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _merge_tiles(checkpoint_store, profile, region):
# merge
point = CheckPoint.CHECKPOINT_MERGE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('merging tiles for:', region)
tilesmerge.merge_catalog(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _optimize_tiles(checkpoint_store, profile, region, base_dir=config.merged_tile_dir):
# optimize
point = CheckPoint.CHECKPOINT_OPT
if checkpoint_store.get_checkpoint(region, profile) < point:
# if platform.system() == 'Windows':
# tiles_opt.set_nothreads()
tiles_opt.optimize_dir(os.path.join(base_dir, region))
# verify all optimized tiles are there
if not verify.verify_opt(region, base_dir=base_dir):
raise Exception(region + ' was not optimized fully')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _should_encrypt(region):
encrypted_providers = {regions.provider_wavey_lines, regions.provider_ukho}
return regions.provider_for_region(region) in encrypted_providers
def _encrypt_region(checkpoint_store, profile, region):
print('encrypting tiles for region:', region)
# encryption
point = CheckPoint.CHECKPOINT_ENCRYPTED
if checkpoint_store.get_checkpoint(region, profile) < point:
if not encryption_shim.encrypt_region(region):
raise Exception('encryption failed!')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_gemf(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('archiving gemf for region:', region)
should_encrypt = _should_encrypt(region)
if should_encrypt:
name = region + '.enc'
else:
name = region + '.opt'
gemf.generate_gemf(name, add_uid=should_encrypt)
#if should_encrypt:
# encryption_shim.generate_token(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _create_zdat(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_METADATA
if checkpoint_store.get_checkpoint(region, profile) < point:
print('building zdat metadata archive for:', region)
zdata.generate_zdat_for_catalog(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _fill_tiles(region):
# fill
# print('filling tile \"holes\"', region)
# filler.fill_all_in_region(region)
print(region, 'fill skipped')
def _create_region_mb_tiles(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
print('archiving mbtiles for region:', region)
region_dir = os.path.join(config.merged_tile_dir, region + '.opt')
mbtiles_file = os.path.join(config.compiled_dir, region + '.mbtiles')
if os.path.isfile(mbtiles_file):
os.remove(mbtiles_file)
mb.disk_to_mbtiles(region_dir, mbtiles_file, format='png', scheme='xyz')
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def __create_chart_mb_tiles(region):
region_charts_dir = os.path.join(config.unmerged_tile_dir, region + '.opt')
for chart in os.listdir(region_charts_dir):
print('archiving mbtiles for chart:', chart)
chart_dir = os.path.join(region_charts_dir, chart)
prefix = re.sub(r'\W+', '_', chart).lower()
mbtiles_file = os.path.join(config.compiled_dir, prefix + '.mbtiles')
if os.path.isfile(mbtiles_file):
os.remove(mbtiles_file)
mb.disk_to_mbtiles(chart_dir, mbtiles_file, format='png', scheme='xyz')
def _create_chart_mb_tiles(checkpoint_store, profile, region):
point = CheckPoint.CHECKPOINT_ARCHIVE
if checkpoint_store.get_checkpoint(region, profile) < point:
__create_chart_mb_tiles(region)
checkpoint_store.clear_checkpoint(region, profile, point)
else:
print('skipping checkpoint', point)
def _skip_zoom(region):
tile_path = os.path.join(config.unmerged_tile_dir, region)
for chart in os.listdir(tile_path):
zs = []
for z_dir in os.listdir(os.path.join(tile_path, chart)):
try:
z = int(z_dir)
zs.append(z)
except ValueError:
pass
zs.sort(reverse=True)
if len(zs) > 1 and (zs[0] - zs[1]) == 1:
i = 0
for z in zs:
if i % 2:
p = os.path.join(tile_path, chart, str(z))
shutil.rmtree(p)
i += 1
def compile_region(region, profile=PROFILE_MX_R, perform_clean=True):
region = region.upper()
profile = profile.upper()
checkpoint_store = CheckPointStore()
_build_catalog(checkpoint_store, profile, region)
_create_tiles(checkpoint_store, profile, region)
if 'REGION' in profile:
_merge_tiles(checkpoint_store, profile, region)
_fill_tiles(region)
_optimize_tiles(checkpoint_store, profile, region)
if 'MX_' in profile:
should_encrypt = _should_encrypt(region)
if should_encrypt:
_encrypt_region(checkpoint_store, profile, region)
_create_gemf(checkpoint_store, profile, region)
_create_zdat(checkpoint_store, profile, region)
if 'MB_' in profile:
_create_region_mb_tiles(checkpoint_store, profile, region)
elif 'CHARTS' in profile and 'MB_' in profile:
_skip_zoom(region)
_optimize_tiles(checkpoint_store, profile, region, base_dir=config.unmerged_tile_dir)
_create_chart_mb_tiles(checkpoint_store, profile, region)
print('final checkpoint', checkpoint_store.get_checkpoint(region, profile))
if perform_clean and checkpoint_store.get_checkpoint(region, profile) > CheckPoint.CHECKPOINT_ENCRYPTED:
cleanup(region, config.unmerged_tile_dir)
cleanup(region, config.merged_tile_dir)
def cleanup(region, base_dir):
for ea in os.listdir(base_dir):
if region in ea:
abs_path = os.path.join(base_dir, ea)
print('clean', abs_path)
for root, dirs, files in os.walk(abs_path, topdown=False):
for name in files:
p = os.path.join(root, name)
try:
os.remove(p)
except:
print('failed to delete', p)
for name in dirs:
os.rmdir(os.path.join(root, name))
def print_usage():
print('usage:\n$python mxmcc.py <region> <optional profile>')
if __name__ == "__main__":
if config.check_dirs():
args = sys.argv
if len(args) < 2:
print_usage()
else:
rgn = args[1]
if len(args) >= 3:
prof = args[2]
else:
prof = PROFILE_MX_R
compile_region(rgn, prof)
else:
print('Your mxmcc directory structure is not ready\n' +
'Please edit the top portion of config.py, run config.py,\n' +
'and place charts in their corresponding directories.')
|
nilq/baby-python
|
python
|
import time
import numpy as np
import dolfin as df
from finmag.energies import Demag
from finmag.field import Field
from finmag.util.meshes import sphere
import matplotlib.pyplot as plt
radius = 5.0
maxhs = [0.2, 0.4, 0.5, 0.6, 0.7, 0.8, 0.85, 0.9, 0.95, 1.0]
unit_length = 1e-9
m_0 = (1, 0, 0)
Ms = 1
H_ref = np.array((- Ms / 3.0, 0, 0))
vertices = []
solvers = ["FK", "FK", "GCR", "Treecode"]
solvers_label = ["FK", "FK opt", "GCR", "Treecode"]
timings = [[], [], [], []]
errors = [[], [], [], []]
for maxh in maxhs:
mesh = sphere(r=radius, maxh=maxh, directory="meshes")
vertices.append(mesh.num_vertices())
S3 = df.VectorFunctionSpace(mesh, "Lagrange", 1)
m_function = df.Function(S3)
m_function.assign(df.Constant(m_0))
m = Field(S3, m_function)
for i, solver in enumerate(solvers):
demag = Demag(solver)
if solver == "FK":
if i == 0:
demag.parameters["phi_1_solver"] = "default"
demag.parameters["phi_1_preconditioner"] = "default"
demag.parameters["phi_2_solver"] = "default"
demag.parameters["phi_2_preconditioner"] = "default"
if i == 1:
demag.parameters["phi_1_solver"] = "cg"
demag.parameters["phi_1_preconditioner"] = "ilu"
demag.parameters["phi_2_solver"] = "cg"
demag.parameters["phi_2_preconditioner"] = "ilu"
demag.setup(m, Ms, unit_length)
start = time.time()
for j in xrange(10):
H = demag.compute_field()
elapsed = (time.time() - start) / 10.0
H = H.reshape((3, -1)).mean(axis=1)
error = abs(H[0] - H_ref[0]) / abs(H_ref[0])
timings[i].append(elapsed)
errors[i].append(error)
fig = plt.figure()
ax = fig.add_subplot(211)
ax.set_title("Runtime")
for i, solver in enumerate(solvers):
ax.plot(vertices, timings[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("time (s)")
ax = fig.add_subplot(212)
ax.set_title("Inaccuracy")
for i, solver in enumerate(solvers):
ax.plot(vertices, errors[i], label=solvers_label[i])
ax.legend(loc=2)
ax.set_xlabel("vertices")
ax.set_ylabel("relative error (%)")
fig.tight_layout()
fig.savefig("benchmark.png")
|
nilq/baby-python
|
python
|
from datetime import datetime, timezone
from hamcrest.core.string_description import StringDescription
from pytest import mark, raises
from preacher.core.datetime import DatetimeWithFormat
from preacher.core.verification.hamcrest import after, before
ORIGIN = datetime(2019, 12, 15, 12, 34, 56, tzinfo=timezone.utc)
@mark.parametrize('value', [
None,
1,
1.2,
complex(1, 2),
'str',
])
def test_datetime_matcher_invalid_creation(value):
with raises(TypeError):
before(value)
with raises(TypeError):
after(value)
@mark.parametrize('item', [None, 1])
def test_datetime_matcher_invalid_validation(item):
matcher = before(ORIGIN)
with raises(TypeError):
matcher.matches(item)
matcher = after(ORIGIN)
with raises(TypeError):
matcher.matches(item)
@mark.parametrize(('value', 'item', 'before_expected', 'after_expected'), [
(ORIGIN, '2019-12-15T12:34:55Z', True, False),
(ORIGIN, '2019-12-15T12:34:56Z', False, False),
(ORIGIN, '2019-12-15T12:34:57Z', False, True),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:55Z', True, False),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:56Z', False, False),
(DatetimeWithFormat(ORIGIN), '2019-12-15T12:34:57Z', False, True),
])
def test_datetime_matcher(value, item, before_expected, after_expected):
matcher = before(ORIGIN)
assert matcher.matches(item) == before_expected
description = StringDescription()
matcher.describe_to(description)
assert str(description).startswith('a value before <')
description = StringDescription()
matcher.describe_mismatch(item, description)
assert str(description).startswith('was <')
matcher = after(value)
assert matcher.matches(item) == after_expected
description = StringDescription()
matcher.describe_to(description)
assert str(description).startswith('a value after <')
description = StringDescription()
matcher.describe_mismatch(item, description)
assert str(description).startswith('was <')
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
from setuptools import setup, find_packages
import dbbackup
def get_requirements():
return open('requirements.txt').read().splitlines()
def get_test_requirements():
return open('requirements-tests.txt').read().splitlines()
keywords = [
'django', 'database', 'media', 'backup',
'amazon', 's3' 'dropbox',
]
setup(
name='django-dbbackup',
version=dbbackup.__version__,
description=dbbackup.__doc__,
author=dbbackup.__author__,
author_email=dbbackup.__email__,
install_requires=get_requirements(),
tests_require=get_test_requirements(),
license='BSD',
url=dbbackup.__url__,
keywords=keywords,
packages=find_packages(),
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Web Environment',
'Environment :: Console',
'Framework :: Django',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Database',
'Topic :: System :: Archiving',
'Topic :: System :: Archiving :: Backup',
'Topic :: System :: Archiving :: Compression'
],
)
|
nilq/baby-python
|
python
|
from utils.rooster_utils import prediction, get_model, load_configurations, set_seed
import torch
import sys
TARGET_SR = 44100
settings = load_configurations(mode="detector")
if(settings == -1):
print("Error: Failed while loading configurations")
sys.exit()
set_seed(settings["globals"]["seed"])
melspectrogram_parameters = settings["dataset"]["params"]["melspectrogram_parameters"]
device = torch.device(settings["globals"]["device"])
model = get_model(settings["model"])
model = model.to(device)
model.train(False)
prediction = prediction(test_audio_path="test_audio/rooster_competition.wav",
model_config=model,
mel_params=melspectrogram_parameters,
target_sr=TARGET_SR,
threshold=0.4, batch_size=120, period = 0.5, steps=4) # period)
print("Total number of roosters", len(prediction))
standings = prediction.sort_values(by='crow_length_msec', ascending=False)
#print(standings)
print("Duration of crow from each rooster in milliseconds")
for index, rooster in prediction.iterrows():
print(rooster["rooster_id"], ":", rooster["crow_length_msec"] )
print('\n')
rank = 1
print("Ranking of roosters by crow length")
for index, rooster in standings.iterrows():
print(rank, ":", int(rooster["rooster_id"]))
rank += 1
print("All prediction data")
print(prediction)
#print(standings)
|
nilq/baby-python
|
python
|
# Given an integer array nums, find the contiguous
# subarray (containing at least one number) which
# has the largest sum and return its sum.
# Example:
# Input: [-2,1,-3,4,-1,2,1,-5,4],
# Output: 6
# Explanation: [4,-1,2,1] has the largest sum = 6.
# Follow up:
# If you have figured out the O(n) solution, try coding
# another solution using the divide and conquer approach,
# which is more subtle.
# EXERCISE ==> https://leetcode.com/problems/maximum-product-subarray/
### UNCOMPLETED SOLUTION
class Solution(object):
def maxProduct(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
# if (len(nums) == 1):
# return nums[0]
# if (len(nums) == 2):
# if (nums[0] > nums[1]):
# return nums[0]
# return nums[1]
max = nums[0] * nums[1]
for idx, num in enumerate(nums):
subArr = nums[idx:-1]
tmp_max = 0
for idx2, num2 in enumerate(subArr):
#return max
|
nilq/baby-python
|
python
|
from funcx_endpoint.endpoint.utils.config import Config
from parsl.providers import LocalProvider
config = Config(
scaling_enabled=True,
provider=LocalProvider(
init_blocks=1,
min_blocks=1,
max_blocks=1,
),
max_workers_per_node=2,
funcx_service_address='https://api.funcx.org/v1'
)
# For now, visible_to must be a list of URNs for globus auth users or groups, e.g.:
# urn:globus:auth:identity:{user_uuid}
# urn:globus:groups:id:{group_uuid}
meta = {
"name": "$name",
"description": "",
"organization": "",
"department": "",
"public": False,
"visible_to": []
}
|
nilq/baby-python
|
python
|
import abc
from numpy import ndarray
class AbstractGAN(abc.ABC):
def __init__(self, run_dir: str, outputs_dir: str, model_dir: str, generated_datasets_dir: str,
resolution: int, channels: int, epochs: int, output_save_frequency: int,
model_save_frequency: int, loss_save_frequency: int,
latent_space_save_frequency: int, dataset_generation_frequency: int, dataset_size: int,
latent_dim: int, latent_space_rows: int = 6, latent_space_columns: int = 6, outputs_rows: int = 6,
outputs_columns: int = 6):
self._run_dir = run_dir
self._outputs_dir = outputs_dir
self._model_dir = model_dir
self._generated_datasets_dir = generated_datasets_dir
self._resolution = resolution
self._channels = channels
self._epochs = epochs
self._output_save_frequency = output_save_frequency
self._model_save_frequency = model_save_frequency
self._loss_save_frequency = loss_save_frequency
self._latent_space_save_frequency = latent_space_save_frequency
self._latent_dim = latent_dim
self._dataset_generation_frequency = dataset_generation_frequency
self._dataset_size = dataset_size
self._latent_space_rows = latent_space_rows
self._latent_space_columns = latent_space_columns
self._outputs_rows = outputs_rows
self._outputs_columns = outputs_columns
self._epoch = 0
@abc.abstractmethod
def _build_models(self) -> None:
pass
@abc.abstractmethod
def train(self, dataset: ndarray, classes: ndarray) -> list:
pass
@abc.abstractmethod
def _save_models_architectures(self) -> None:
pass
@abc.abstractmethod
def _save_outputs(self) -> None:
pass
@abc.abstractmethod
def _save_latent_space(self) -> None:
pass
@abc.abstractmethod
def _save_losses(self) -> None:
pass
@abc.abstractmethod
def _save_models(self) -> None:
pass
@abc.abstractmethod
def _generate_dataset(self) -> None:
pass
|
nilq/baby-python
|
python
|
#! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os
from comnetsemu.cli import CLI, spawnXtermDocker
from comnetsemu.net import Containernet, VNFManager
from mininet.link import TCLink
from mininet.log import info, setLogLevel
from mininet.node import Controller, RemoteController
if __name__ == "__main__":
# Only used for auto-testing.
AUTOTEST_MODE = os.environ.get("COMNETSEMU_AUTOTEST_MODE", 0)
# Create template host, switch, and link
hconfig = {"inNamespace": True}
http_link_config = {"bw": 1}
video_link_config = {"bw": 10}
host_link_config = {}
setLogLevel("info")
net = Containernet(
controller=Controller,
link=TCLink,
xterms=False,
autoSetMacs=True,
autoStaticArp=True,
)
mgr = VNFManager(net)
info("*** Add controller\n")
controller = RemoteController("c1", ip="127.0.0.1", port=6633)
net.addController(controller)
info("*** Creating hosts\n")
h1 = net.addDockerHost(
"h1",
dimage="dev_test",
ip="10.0.0.1",
docker_args={"hostname": "h1"},
)
h2 = net.addDockerHost(
"h2",
dimage="dev_test",
ip="10.0.0.2",
docker_args={"hostname": "h2"},
)
h3 = net.addDockerHost(
"h3",
dimage="dev_test",
ip="10.0.0.3",
docker_args={"hostname": "h3"},
)
h4 = net.addDockerHost(
"h4",
dimage="dev_test",
ip="10.0.0.4",
docker_args={"hostname": "h4"},
)
h5 = net.addDockerHost(
"h5",
dimage="dev_test",
ip="10.0.0.5",
docker_args={"hostname": "h5"},
)
h6 = net.addDockerHost(
"h6",
dimage="dev_test",
ip="10.0.0.6",
docker_args={"hostname": "h6"},
)
h7 = net.addDockerHost(
"h7",
dimage="dev_test",
ip="10.0.0.7",
docker_args={"hostname": "h7"},
)
h8 = net.addDockerHost(
"h8",
dimage="dev_test",
ip="10.0.0.8",
docker_args={"hostname": "h8"},
)
info("*** Adding switch and links\n")
for i in range(7):
sconfig = {"dpid": "%016x" % (i + 1)}
net.addSwitch("s%d" % (i + 1), protocols="OpenFlow10", **sconfig)
# s1 = net.addSwitch("s1")
# s2 = net.addSwitch("s2")
# s3 = net.addSwitch("s3")
# s4 = net.addSwitch("s4")
# s5 = net.addSwitch("s5")
# s6 = net.addSwitch("s6")
# s7 = net.addSwitch("s7")
# Add switch links
net.addLink("s1", "s3", **http_link_config)
net.addLink("s1", "s4", **http_link_config)
net.addLink("s2", "s4", **http_link_config)
net.addLink("s2", "s5", **http_link_config)
net.addLink("s3", "s6", **http_link_config)
net.addLink("s4", "s6", **http_link_config)
net.addLink("s4", "s7", **http_link_config)
net.addLink("s5", "s7", **http_link_config)
# Add host links
net.addLink("h1", "s1", **host_link_config)
net.addLink("h2", "s1", **host_link_config)
net.addLink("h3", "s2", **host_link_config)
net.addLink("h4", "s2", **host_link_config)
net.addLink("h5", "s6", **host_link_config)
net.addLink("h6", "s6", **host_link_config)
net.addLink("h7", "s7", **host_link_config)
net.addLink("h8", "s7", **host_link_config)
info("\n*** Starting network\n")
net.start()
srv4 = mgr.addContainer(
"srv4",
"h4",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv7 = mgr.addContainer(
"srv7",
"h7",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv8 = mgr.addContainer(
"srv8",
"h8",
"echo_server",
"python /home/server.py",
docker_args={},
)
srv1 = mgr.addContainer("srv1", "h1", "dev_test", "bash", docker_args={})
srv2 = mgr.addContainer("srv2", "h2", "dev_test", "bash", docker_args={})
srv3 = mgr.addContainer("srv3", "h3", "dev_test", "bash", docker_args={})
srv5 = mgr.addContainer("srv5", "h5", "dev_test", "bash", docker_args={})
srv6 = mgr.addContainer("srv6", "h6", "dev_test", "bash", docker_args={})
if not AUTOTEST_MODE:
# Cannot spawn xterm for srv1 since BASH is not installed in the image:
# echo_server.
spawnXtermDocker("srv3")
CLI(net)
mgr.removeContainer("srv1")
mgr.removeContainer("srv2")
mgr.removeContainer("srv3")
mgr.removeContainer("srv4")
mgr.removeContainer("srv5")
mgr.removeContainer("srv6")
mgr.removeContainer("srv7")
mgr.removeContainer("srv8")
net.stop()
mgr.stop()
|
nilq/baby-python
|
python
|
import streamlit as st
import pandas as pd
import joblib
model = joblib.load('/content/drive/MyDrive/models/cc_foodrcmdns.pkl')
df = pd.read_csv('dataset/indianfoodMAIN.csv')
recp_name = st.selectbox("Select Recipe", df['recp_name'].values)
st.write(recp_name)
def findRcmdn(value):
data = []
index = df[df['recp_name'] == value].index[0]
distances = sorted(list(enumerate(model[index])),reverse=True,key = lambda x: x[1])
for i in distances[1:6]:
# print(df.iloc[i[0]].translatedrecipename,df.iloc[i[0]].cuisine)
print(f"{df.iloc[i[0]]['recp_name'] } , Cuisin : {df.iloc[i[0]].cuisine}")
allvalues = { "recp_name": df.iloc[i[0]]['recp_name'],
"cuisine": df.iloc[i[0]]['cuisine'],
"image-url": df.iloc[i[0]]['image-url'],
"url": df.iloc[i[0]]["url"],
}
data.append(allvalues)
return data
def custom_markdown(name, img_url, URL,csn):
mymark = f"""
<div class="w3-container w3-red">
<h1> {name} </h1>
<h5>Cuisine: {csn}</h5>
</div>
<img src={img_url} alt="" style="width:50%">
<div class="w3-container">
<p> Recipe Instructions: <a href={URL} target="_blank" >Read...</a> </p>
</div>
<div class="w3-container w3-red">
</div>
"""
return mymark
if st.button("Show"):
st.text("Recipe Recommendations....")
recommendations = findRcmdn(recp_name)
for result in recommendations:
st.markdown(custom_markdown(name= result['recp_name'], img_url=result['image-url'], URL=result["url"],csn=result["cuisine"] ), True )
# st.info(result['recp_name'])
|
nilq/baby-python
|
python
|
# Copyright (c) 2017-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
# Some utility functions
def average_norm_clip(grad, clip_val):
'''
Compute the norm and clip it if necessary.
The first dimension will be batchsize.
Args:
grad(Tensor): the gradient
clip_val(float): value to clip to
'''
batchsize = grad.size(0)
avg_l2_norm = 0.0
for i in range(batchsize):
avg_l2_norm += grad[i].data.norm()
avg_l2_norm /= batchsize
if avg_l2_norm > clip_val:
# print("l2_norm: %.5f clipped to %.5f" % (avg_l2_norm, clip_val))
grad *= clip_val / avg_l2_norm
def accumulate(acc, new):
''' accumulate by the same key in a list of dicts
Args:
acc(dict): the dict to accumulate to
new(dict): new dict entry
Returns:
A new dict containing the accumulated sums of each key.
'''
ret = { k: new[k] if a is None else a + new[k] for k, a in acc.items() if k in new }
ret.update({ k : v for k, v in new.items() if not (k in acc) })
return ret
def add_err(overall_err, new_err):
''' Add ``new_err`` to ``overall_err``
Args:
overall_err(float): summed overall error
new_err(float): new error
'''
if overall_err is None:
return new_err
else:
overall_err += new_err
return overall_err
def add_stats(stats, key, value):
''' Feed ``value`` to ``stats[key]``'''
if stats:
stats[key].feed(value)
def check_terminals(has_terminal, batch):
''' Check if the environment sent a terminal signal '''
# Block backpropagation if we go pass a terminal node.
for i, terminal in enumerate(batch["terminal"]):
if terminal: has_terminal[i] = True
def check_terminals_anyT(has_terminal, batch, T):
''' Check if any of ``batch[t], t <= T`` is terminal'''
for t in range(T):
check_terminals(has_terminal, batch[t])
|
nilq/baby-python
|
python
|
import gc
import numpy as np
import pandas as pd
from datetime import datetime
from functools import partial
import tensorflow as tf
from sklearn import preprocessing
from .. import utils
from ..config import cfg
from .base_model import BaseModel
class Graph():
'''Container class for tf.Graph and associated variables.
'''
def __init__(self):
self.graph = None
class FFNModel(BaseModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, tf_path=None, logger=None):
BaseModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, logger)
self.graph = None
self.n_inputs = None
self.n_outputs = None
self.initializer = None
self.regularizer = None
self.activation = None
self.tf_path = tf_path
self.logdir = None
self.output_suffix = '_dnn_pred'
def preprocess(self, imputer_strategy='mean'):
'''Mean-fill NaN, center, and scale inputs
'''
train_idx = self.X_train.index
test_idx = self.X_test.index
cols_in = self.X_train.columns
train_len = self.X_train.shape[0]
X = np.concatenate([self.X_train.values, self.X_test.values], axis=0)
imputer = preprocessing.Imputer(strategy=imputer_strategy, axis=0, verbose=1)
self.logger.info('filling NaN...')
X[X == np.inf] = np.nan
X[X == -np.inf] = np.nan
X = imputer.fit_transform(X)
self.logger.info('standardizing inputs...')
X = preprocessing.scale(X)
self.X_train = pd.DataFrame(X[:train_len, :], index=train_idx, columns=cols_in)
self.X_test = pd.DataFrame(X[train_len:, :], index=test_idx, columns=cols_in)
del X
self.logger.info('preprocessing complete.')
def init_hparams(self):
'''interpret params.yaml file to set tf.Graph params
'''
self.n_inputs = self.X_train.shape[1]
if 'n_outputs' in self.params:
self.n_outputs = self.params['n_outputs']
else:
self.n_outputs = 1
if 'init_mode' in self.params:
init_mode = self.params['init_mode']
else:
init_mode = 'FAN_AVG'
if 'init_uniform' in self.params:
init_uniform = self.params['init_uniform']
else:
init_uniform = True
self.initializer = (
tf.contrib.layers
.variance_scaling_initializer(mode=init_mode,
uniform=init_uniform))
if 'l1_reg_weight' in self.params:
l1_reg = float(self.params['l1_reg_weight'])
else:
l1_reg = 0.0
if 'l2_reg_weight' in self.params:
l2_reg = float(self.params['l2_reg_weight'])
else:
l2_reg = 0.0
reg={'None': None,
'l1': tf.contrib.layers.l1_regularizer(scale=l1_reg),
'l2': tf.contrib.layers.l2_regularizer(scale=l2_reg),
'l1-l2': tf.contrib.layers.l1_l2_regularizer(
scale_l1=l1_reg, scale_l2=l2_reg)}
if 'regularizer' in self.params:
self.regularizer = reg[self.params['regularizer']]
else:
self.regularizer = None
act={'elu': tf.nn.elu,
'relu': tf.nn.relu,
'leaky-relu': tf.nn.leaky_relu,
'selu': tf.nn.selu,
'crelu': tf.nn.crelu,
'tanh': tf.tanh,
'sigmoid': tf.sigmoid}
if 'activation' in self.params:
self.activation = act[self.params['activation']]
else:
self.activation = tf.nn.relu
self.logger.info(f'Activation not specified in params. ' +
f'Using ReLU.')
optimizers = {
'sgd': tf.train.GradientDescentOptimizer,
'momentum': partial(tf.train.MomentumOptimizer,
momentum=float(self.params['momentum'])),
'adam': partial(tf.train.AdamOptimizer,
beta1=float(self.params['adam_beta1']),
beta2=float(self.params['adam_beta2']),
epsilon=float(self.params['adam_epsilon'])),
'adagrad': tf.train.AdagradOptimizer,
'adadelta': tf.train.AdadeltaOptimizer,
'adamw': partial(tf.contrib.opt.AdamWOptimizer,
beta1=float(self.params['adam_beta1']),
beta2=float(self.params['adam_beta2']),
epsilon=float(self.params['adam_epsilon']),
weight_decay=float(self.params['adam_weight_decay']))}
if 'optimizer' in self.params:
self.optimizer = optimizers[self.params['optimizer']]
else:
self.optimizer = tf.train.GradientDescentOptimizer
self.logger.info(f'Optimizer not specified in params. ' +
f'Using GradientDescentOptimizer')
def _shuffle_idx(self, X):
'''Shuffle batch order when training with minibatches.
'''
idx = X.index.values
rng = np.random.RandomState(datetime.now().microsecond)
return rng.permutation(idx)
def get_batch(self, X_in, y_in, idx, batch_size, batch_no):
'''Used in train_mode='minibatch', i.e. each epoch trains against
full training set (shuffled).
'''
idx_batch = idx[batch_size * (batch_no-1):batch_size * batch_no]
X_batch = X_in.reindex(idx_batch).values
y_batch = y_in.reindex(idx_batch).values
return X_batch, y_batch
def get_sample(self, X_in, y_in, batch_size):
rng = np.random.RandomState(datetime.now().microsecond)
idx_in = X_in.index.values
idx_sample = rng.choice(idx_in, size=batch_size, replace=False)
X_batch = X_in.loc[idx_sample, :].values
y_batch = y_in.loc[idx_sample].values
return X_batch, y_batch
def init_tensorboard(self):
'''set directory and filename for tensorboard logs and checkpoint file
'''
now = datetime.now().strftime("%m%d-%H%M")
comment = self.prefix + ''
self.logdir = f'{self.tf_path}/tensorboard_logs/{now}{comment}/'
self.ckpt_file = f'{self.tf_path}/sessions/{self.prefix}_tf_model.ckpt'
def ff_layer(self, g, layer_in, layer_no):
with g.graph.as_default():
layer = tf.layers.dropout(layer_in,
rate=self.params['drop_rates'][layer_no],
training=g.train_flag,
name='drop_' + str(layer_no + 1))
layer = tf.layers.dense(layer,
self.params['layers'][layer_no],
kernel_initializer=self.initializer,
kernel_regularizer=self.regularizer,
name='dense_' + str(layer_no + 1))
if self.params['use_batch_norm'][layer_no]:
layer = tf.layers.batch_normalization(
layer, training=g.train_flag,
momentum=self.params['batch_norm_momentum'],
name='bn_' + str(layer_no + 1))
layer = self.activation(layer, name='act_' + str(layer_no + 1))
return g, layer
def _grid_cv_fold(self, fold):
params_grid, keys = self._get_cv_params_grid()
columns_list = ['fold_no', *keys]
for met in self.metrics:
columns_list.extend(['best_' + met, 'rnd_' + met])
fold_results_list = []
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
for i, param_set in enumerate(params_grid):
params_str = ''
for j in range(len(param_set)):
self.params[keys[j]] = param_set[j]
params_str += f'{keys[j]}={self.params[keys[j]]} '
self.logger.info(params_str)
self.init_hparams()
self.train_eval(X_train, y_train, X_val, y_val)
self.sess.close()
best_evals = self.best_eval_multi()
for eval in best_evals:
self.logger.info(f' best val {eval[0]}: {eval[1]:.4f}, ' +
f'round {eval[2]}')
self.logger.info('')
results_row = [fold, *(str(k) for k in param_set)]
for eval in best_evals:
results_row.extend([eval[1], eval[2]])
round_results = pd.DataFrame([results_row], columns=columns_list, index=[i])
fold_results_list.append(round_results)
return pd.concat(fold_results_list, axis=0)
def grid_cv(self, val_rounds):
'''Grid cross-valdidation. Permutes params/values in self.cv_grid (dict).
Args: val_rounds, integer: number of CV rounds
(mimimum: 1, maximum: number of folds)
Returns: no return; updates self.cv_results with grid CV results
'''
self.load_hparams()
keys = [*self.cv_grid.keys()]
columns = []
for met in self.metrics:
columns.extend(['best_' + met, 'rnd_' + met])
results_list = []
self.logger.info(f'starting grid CV.')
self.logger.info(f'base params: {self.params}')
for fold in range(1, val_rounds + 1):
self.logger.info(f'------------ FOLD {fold} OF {val_rounds} ------------')
fold_results = self._grid_cv_fold(fold)
results_list.append(fold_results)
self.cv_results = pd.concat(results_list, axis=0)
# display/log grid CV summary
groupby = [self.cv_results[key] for key in keys]
summ_df = self.cv_results[columns].groupby(groupby).mean()
self.logger.info(self.parse_summ_df(summ_df))
# reset/reload all params from params file
self.load_hparams()
def cv_predictions(self):
'''Generate fold-by-fold predictions. For each fold k, train on all other
folds and make predictions for k. For test set, train on the full training
dataset.
Loads all hyperparameters from the params.yaml file. Will overwrite any/all
instance.params settings.
Args: none.
Returns: pandas DataFrame with predictions for each fold in the training set,
combined with predictions for the test set.
'''
self.logger.info(f'starting predictions for CV outputs...')
self.load_hparams()
self.logger.info(f'all params restored from {self.params_file}.')
train_preds = []
for fold in range(1, self.n_folds + 1):
_, val_idx = self._get_fold_indices(fold)
X_train, y_train, X_val, y_val = self._get_fold_data(fold)
fold_outputs = self.train_eval(X_train, y_train, X_val, y_val, return_preds=True)
self.sess.close()
preds_ser = pd.Series(fold_outputs, index=val_idx)
train_preds.append(preds_ser)
self.logger.info(f'fold {fold} CV outputs complete.')
train_preds = pd.concat(train_preds)
return train_preds.rename(self.prefix + self.output_suffix, inplace=True)
def test_predictions(self):
test_preds = self.train_eval(self.X_train, self.y_train,
self.X_test, None, return_preds=True)
self.sess.close()
test_preds = pd.Series(test_preds, index=self.X_test.index)
self.logger.info(f'test set outputs complete.')
return test_preds.rename(self.prefix + self.output_suffix, inplace=True)
class DNNRegressor(FFNModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, weights, tf_path=None, logger=None):
FFNModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, tf_path, logger)
self.metrics = ['MSE', 'MAE']
self.n_outputs = 1
def init_hparams(self):
FFNModel.init_hparams(self)
self.n_outputs = 1
def best_eval_multi(self):
'''Return the minimum value for MSE and MAE
'''
return FFNModel.best_eval_multi(self, 'min')
def build_graph(self):
self.init_hparams()
self.init_tensorboard()
g = Graph()
g.graph = tf.Graph()
with g.graph.as_default():
g.X = tf.placeholder(tf.float32, shape=(None,
self.n_inputs),
name='X')
g.y = tf.placeholder(tf.float32, shape=(None), name='y')
g.train_flag = tf.placeholder_with_default(False, shape=(), name='training')
g.stack = [g.X]
for layer_no, layer in enumerate(self.params['layers']):
g, layer_out = self.ff_layer(g, g.stack[-1], layer_no)
g.stack.append(layer_out)
g.drop = tf.layers.dropout(g.stack[-1],
rate=self.params['drop_rates'][-1],
training=g.train_flag,
name='drop_before_logits')
g.dnn_outputs = tf.layers.dense(g.drop, 1, activation=None)
with tf.name_scope('loss'):
g.MAE = tf.reduce_mean(tf.abs(g.dnn_outputs - g.y))
g.MSE = tf.reduce_mean(tf.square(g.dnn_outputs - g.y))
g.exp_error = tf.reduce_mean(tf.subtract(tf.exp(tf.abs(g.dnn_outputs - g.y)), 1))
g.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
with tf.name_scope('train'):
g.optimizer = self.optimizer(learning_rate=float(self.params['eta']))
objective = self.params['objective']
if objective == 'MAE':
g.loss=tf.add_n([g.MAE] + g.reg_losses, name='combined_loss')
elif objective == 'MSE':
g.loss=tf.add_n([g.MSE] + g.reg_losses, name='combined_loss')
elif objective == 'exp_error':
g.loss=tf.add_n([g.exp_error] + g.reg_losses, name='combined_loss')
g.training_op = g.optimizer.minimize(g.loss)
g.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
if self.params['regularizer'] != 'None':
with tf.name_scope('reg_losses'):
g.train_reg_loss = tf.summary.scalar('train', tf.add_n(g.reg_losses))
g.val_reg_loss = tf.summary.scalar('val', tf.add_n(g.reg_losses))
with tf.name_scope('MSE'):
g.train_mse = tf.summary.scalar('train', g.MSE)
g.val_mse = tf.summary.scalar('val', g.MSE)
with tf.name_scope('MAE'):
g.train_mae = tf.summary.scalar('train', g.MAE)
g.val_mae = tf.summary.scalar('val', g.MAE)
g.file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
g.saver = tf.train.Saver()
return g
def train_eval(self, X_train, y_train, X_val, y_val, return_preds=False, save_ckpt=False):
g = self.build_graph()
self.evals_out = {'round': [],
'train': {'MSE': [], 'MAE': []},
'val': {'MSE': [], 'MAE': []}}
train_batch_size = self.params['train_batch_size']
val_batch_size = self.params['val_batch_size']
n_val_batches = self.params['n_val_batches']
if not return_preds:
# add header for logger
self.logger.info(f' RND TRAIN | VAL')
self.logger.info(f' MSE MAE | MSE MAE')
self.sess = tf.InteractiveSession(graph=g.graph,
config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(tf.global_variables_initializer())
if self.params['train_mode'] == 'minibatch':
n_batches = (X_train.shape[0] // train_batch_size) + 1
train_batch_size = X_train.shape[0] // n_batches
self.logger.info(f'CV batch size scaled: {train_batch_size} n_batches {n_batches}')
self.params['tboard_evals_step'] = 1
self.params['log_evals_step'] = 1
self.logger.info(f'evals set to every epoch')
for epoch in range(self.params['n_epochs']):
if self.params['train_mode'] == 'minibatch':
idx = self._shuffle_idx(X_train)
for batch in range(1, n_batches+1):
X_train_batch, y_train_batch = self.get_batch(X_train,
y_train, idx, train_batch_size, batch)
train_op_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops], feed_dict=train_op_dict)
elif self.params['train_mode'] == 'sgd':
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, batch_size)
train_op_dict = {g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops], feed_dict=train_op_dict)
if ((epoch + 1) % self.params['tboard_evals_step'] == 0
and not return_preds):
train_mse_summ = g.train_mse.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
train_mae_summ = g.train_mae.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
g.file_writer.add_summary(train_mse_summ, epoch+1)
g.file_writer.add_summary(train_mae_summ, epoch+1)
X_val_batch, y_val_batch = self.get_sample(X_val, y_val, val_batch_size)
val_mse_summ = g.val_mse.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
val_mae_summ =g. val_mae.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
g.file_writer.add_summary(val_mse_summ, epoch+1)
g.file_writer.add_summary(val_mae_summ, epoch+1)
if self.params['regularizer'] in ['l1', 'l2', 'l1-l2']:
train_reg_loss_summ = g.train_reg_loss.eval(
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
g.file_writer.add_summary(train_reg_loss_summ, epoch)
val_reg_loss_summ = g.val_reg_loss.eval(
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
g.file_writer.add_summary(val_reg_loss_summ, epoch)
if ((epoch + 1) % self.params['log_evals_step'] == 0
and not return_preds):
round_evals = {'train': {'MSE': [], 'MAE': []},
'val': {'MSE': [], 'MAE': []}}
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
round_evals['train']['MSE'].append(
g.MSE.eval(feed_dict={g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:False}))
round_evals['train']['MAE'].append(
g.MAE.eval(feed_dict={g.X: X_train_batch,
g.y: y_train_batch,
g.train_flag:False}))
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
round_evals['val']['MSE'].append(
g.MSE.eval(feed_dict={g.X: X_val_batch,
g.y: y_val_batch,
g.train_flag:False}))
round_evals['val']['MAE'].append(
g.MAE.eval(feed_dict={g.X: X_val_batch,
g.y: y_val_batch,
g.train_flag:False}))
train_mse_ = sum(round_evals['train']['MSE']) / n_val_batches
train_mae_ = sum(round_evals['train']['MAE']) / n_val_batches
eval_mse_ = sum(round_evals['val']['MSE']) / n_val_batches
eval_mae_ = sum(round_evals['val']['MAE']) / n_val_batches
# add round results for logger
self.logger.info(f' {str(epoch + 1):>4} {train_mse_:>10.4f} ' +
f'{train_mae_:>10.4f} | ' +
f'{eval_mse_:>10.4f} {eval_mae_:>10.4f}')
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['MSE'].append(train_mse_)
self.evals_out['train']['MAE'].append(train_mae_)
self.evals_out['val']['MSE'].append(eval_mse_)
self.evals_out['val']['MAE'].append(eval_mae_)
if save_ckpt:
save_path = g.saver.save(self.sess, self.ckpt_file)
g.file_writer.close()
self.logger.info(f'checkpoint saved as \'{self.ckpt_file}\'.')
if return_preds:
chunk_size = int(self.params['predict_chunk_size'])
n_chunks = X_val.shape[0] // chunk_size + 1
fold_preds = []
for i in range(n_chunks):
feed_dict={train_flag:False,
X: X_val.iloc[(i*chunk_size):((i+1)*chunk_size), :].values}
preds_chunk = g.dnn_outputs.eval(feed_dict=feed_dict)
fold_preds.extend(preds_chunk.ravel())
return fold_preds
class FFNClassifier(DNNModel):
def __init__(self, X_train, y_train, X_test, params_file=None, folds_lookup=None,
prefix=None, weights=None, tf_path=None, logger=None):
DNNModel.__init__(self, X_train, y_train, X_test, params_file, folds_lookup,
prefix, tf_path, logger)
self.y_train = self.y_train.astype(int)
self.metrics = ['AUC', 'acc', 'precision', 'recall']
self.ckpt_file = ckpt_file
def init_hparams(self):
FFNModel.init_hparams(self)
if 'pos_weight' not in self.params:
self.params['pos_weight'] = 1.0
def best_eval_multi(self):
'''Return the maximum round result for all metrics
(AUC, accuracy, precision, and recall)
'''
return FFNModel.best_eval_multi(self, 'max')
def build_graph(self):
self.init_hparams()
self.init_tensorboard()
g = Graph()
g.graph = tf.Graph()
with g.graph.as_default():
g.X = tf.placeholder(tf.float32, shape=(None, int(self.n_inputs)), name='X')
if self.n_outputs == 1:
g.y = tf.placeholder(tf.int32,
shape=(None),
name='y')
g.y_2d = tf.one_hot(g.y, 2, axis=-1)
else:
g.y = tf.placeholder(tf.int32,
shape=(None, int(self.n_outputs)),
name='y')
g.y_2d = tf.identity(g.y, name='y_passthru')
g.train_flag = tf.placeholder_with_default(False, shape=(), name='training')
g.stack = [g.X]
for layer_no, layer in enumerate(self.params['layers']):
g, layer_out = self.ff_layer(g, g.stack[-1], layer_no)
g.stack.append(layer_out)
g.drop_final = tf.layers.dropout(g.stack[-1],
rate=self.params['drop_rates'][-1],
training=g.train_flag,
name='drop_before_logits')
if self.n_outputs == 1:
g.logits = tf.layers.dense(g.drop_final, 2, name='logits')
else:
g.logits = tf.layers.dense(g.drop_final, int(self.n_outputs), name='logits')
with tf.name_scope('predictions'):
g.soft_preds_sparse = tf.nn.softmax(g.logits, name='soft_preds_sparse')
# TODO: adjust for multi-class
g.soft_preds_scalar = g.soft_preds_sparse[:, 1]
#g.soft_preds = tf.slice(g.soft_preds, [0, 1], [-1, 1])
g.hard_preds_scalar = tf.argmax(g.logits, axis=-1, name='hard_preds_scalar')
if self.n_outputs == 1:
g.hard_preds_sparse = tf.one_hot(g.hard_preds_scalar, 2,
name='hard_preds_sparse')
else:
g.hard_preds_sparse = tf.one_hot(g.hard_preds_scalar,
self.n_outputs,
name='hard_preds_sparse')
with tf.name_scope('loss'):
g.xentropy = tf.nn.weighted_cross_entropy_with_logits(g.y_2d,
logits=g.logits, pos_weight=self.params['pos_weight'])
g.xentropy_mean=tf.reduce_mean(g.xentropy, name='xentropy')
g.reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
g.combined_loss=tf.add_n([g.xentropy_mean] + g.reg_losses, name='combined_loss')
# BINARY classification: tf.metrics 'accuracy', 'auc', 'precision', and 'recall'
if self.n_outputs == 1:
with tf.name_scope('binary_metrics'):
g.train_acc_val, g.train_acc_op = tf.metrics.accuracy(
labels=g.y, predictions=g.hard_preds_scalar)
g.train_auc_val, g.train_auc_op = tf.metrics.auc(
labels=g.y, predictions=g.soft_preds_scalar)
g.train_precision_val, g.train_precision_op = tf.metrics.precision(
labels=g.y, predictions=g.hard_preds_scalar)
g.train_recall_val, g.train_recall_op = tf.metrics.recall(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_acc_val, g.val_acc_op = tf.metrics.accuracy(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_auc_val, g.val_auc_op = tf.metrics.auc(
labels=g.y, predictions=g.soft_preds_scalar)
g.val_precision_val, g.val_precision_op = tf.metrics.precision(
labels=g.y, predictions=g.hard_preds_scalar)
g.val_recall_val, g.val_recall_op = tf.metrics.recall(
labels=g.y, predictions=g.hard_preds_scalar)
# EXPERIMENTAL: tf.metrics 'mean_per_class_accuracy', 'precision_at_k',
# and 'recall_at_k' for multi- classification
k = 1 # top-1 scores
if self.n_outputs > 2:
with tf.name_scope('multiclass_metrics'):
g.train_acc_val, g.train_acc_op = tf.metrics.mean_per_class_accuracy(
g.y, g.hard_preds_scalar, num_classes=self.n_outputs)
g.train_precision_val, g.train_precision_op = tf.metrics.precision_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.train_recall_val, g.train_recall_op = tf.metrics.recall_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.val_acc_val, g.val_acc_op = tf.metrics.mean_per_class_accuracy(
g.y, g.hard_preds_scalar, num_classes=self.n_outputs)
g.val_precision_val, g.val_precision_op = tf.metrics.precision_at_k(
g.y_2d, g.hard_preds_sparse, k)
g.val_recall_val, g.val_recall_op = tf.metrics.recall_at_k(
g.y_2d, g.hard_preds_sparse, k)
with tf.name_scope('train'):
g.optimizer = self.optimizer(learning_rate=float(self.params['eta']))
g.training_op = g.optimizer.minimize(g.combined_loss)
g.extra_update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.name_scope('xentropy'):
g.train_xentropy = tf.summary.scalar('train', g.xentropy_mean)
g.val_xentropy = tf.summary.scalar('val', g.xentropy_mean)
if self.params['regularizer'] != 'None':
with tf.name_scope('reg_losses'):
g.train_reg_loss = tf.summary.scalar('train', tf.add_n(g.reg_losses))
g.val_reg_loss = tf.summary.scalar('val', tf.add_n(g.reg_losses))
with tf.name_scope('ROC_AUC'):
g.train_auc = tf.summary.scalar('train', g.train_auc_val)
g.val_auc = tf.summary.scalar('val', g.val_auc_val)
with tf.name_scope('accuracy'):
g.train_acc = tf.summary.scalar('train', g.train_acc_val)
g.val_acc = tf.summary.scalar('val', g.val_acc_val)
g.file_writer = tf.summary.FileWriter(self.logdir, tf.get_default_graph())
g.saver = tf.train.Saver()
return g
def train_eval(self, X_train, y_train, X_val, y_val, return_preds=False,
save_ckpt=False):
g = self.build_graph()
self.evals_out = {'round': [],
'train': {'AUC': [], 'acc': [], 'precision': [], 'recall': []},
'val': {'AUC': [], 'acc': [], 'precision': [], 'recall': []}}
train_batch_size = self.params['train_batch_size']
val_batch_size = self.params['val_batch_size']
n_val_batches = self.params['n_val_batches']
if not return_preds:
self.logger.info(f' RND TRAIN | VAL')
self.logger.info(f' acc auc prec recall | acc auc prec recall')
self.sess = tf.InteractiveSession(graph=g.graph,
config=tf.ConfigProto(allow_soft_placement=True))
self.sess.run(tf.global_variables_initializer())
if self.params['train_mode'] == 'minibatch':
n_train_batches = (X_train.shape[0] // train_batch_size) + 1
train_batch_size = X_train.shape[0] // n_train_batches
self.logger.info(f'CV batch size scaled: {train_batch_size} n_train_batches {n_train_batches}')
self.params['tboard_evals_step'] = 1
self.params['log_evals_step'] = 1
self.logger.info(f'evals set to every epoch')
for epoch in range(self.params['n_epochs']):
if self.params['train_mode'] == 'minibatch':
idx = self._shuffle_idx(X_train)
for batch in range(1, n_batches+1):
X_train_batch, y_train_batch = self.get_batch(X_train,
y_train, idx, train_batch_size, batch)
train_op_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops],
feed_dict=train_op_dict)
elif self.params['train_mode'] == 'sgd':
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, batch_size)
train_op_dict = {X: X_train_batch,
y: y_train_batch,
train_flag:True}
self.sess.run([g.training_op, g.extra_update_ops],
feed_dict=train_op_dict)
# Tensorboard evals
if ((epoch + 1) % self.params['tboard_evals_step'] == 0
and not return_preds):
self.sess.run(tf.local_variables_initializer())
train_eval_dict = {g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False}
self.sess.run(g.train_acc_op, feed_dict=train_eval_dict)
train_xent_summ, train_acc_summ =\
self.sess.run([g.train_xentropy, g.train_acc],
feed_dict=train_eval_dict)
g.file_writer.add_summary(train_xent_summ, epoch+1)
g.file_writer.add_summary(train_acc_summ, epoch+1)
X_val_batch, y_val_batch = self.get_sample(X_val, y_val, val_batch_size)
val_eval_dict = {g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False}
self.sess.run(g.val_acc_op, feed_dict=val_eval_dict)
val_xent_summ, val_acc_summ =\
self.sess.run([g.val_xentropy, g.val_acc],
feed_dict=val_eval_dict)
g.file_writer.add_summary(val_xent_summ, epoch+1)
g.file_writer.add_summary(val_acc_summ, epoch+1)
# eval AUC for binary classification only
if self.n_outputs == 1:
self.sess.run(g.train_auc_op, feed_dict=train_eval_dict)
train_auc_summ = self.sess.run(g.train_auc, feed_dict=train_eval_dict)
g.file_writer.add_summary(train_auc_summ, epoch+1)
self.sess.run(g.val_auc_op, feed_dict=train_eval_dict)
val_auc_summ = self.sess.run(g.val_auc, feed_dict=val_eval_dict)
g.file_writer.add_summary(val_auc_summ, epoch+1)
if self.params['regularizer'] in ['l1', 'l2', 'l1-l2']:
train_reg_loss_summ = g.train_reg_loss.eval(
feed_dict=train_eval_dict)
g.file_writer.add_summary(train_reg_loss_summ, epoch)
val_reg_loss_summ = g.val_reg_loss.eval(
feed_dict=val_eval_dict)
g.file_writer.add_summary(val_reg_loss_summ, epoch)
# logger evals for BINARY classification
if ((epoch + 1) % self.params['log_evals_step'] == 0
and self.n_outputs == 1 and not return_preds):
self.sess.run(tf.local_variables_initializer())
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
self.sess.run([g.train_acc_op, g.train_auc_op, g.train_precision_op, g.train_recall_op],
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
self.sess.run([g.val_acc_op, g.val_auc_op, g.val_precision_op, g.val_recall_op],
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
train_acc_, train_auc_, train_precision_, train_recall_ =\
self.sess.run([g.train_acc_val, g.train_auc_val, g.train_precision_val, g.train_recall_val])
val_acc_, val_auc_, val_precision_, val_recall_ =\
self.sess.run([g.val_acc_val, g.val_auc_val, g.val_precision_val, g.val_recall_val])
# log evals
self.logger.info(f' {str(epoch + 1):>4} {train_acc_:.4f} {train_auc_:.4f} ' +
f'{train_precision_:.4f} {train_recall_:.4f} | ' +
f'{val_acc_:.4f} {val_auc_:.4f} ' +
f'{val_precision_:.4f} {val_recall_:.4f}')
# record evals to self.evals_out (for plot_results)
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['acc'].append(train_acc_)
self.evals_out['train']['AUC'].append(train_auc_)
self.evals_out['train']['precision'].append(train_precision_)
self.evals_out['train']['recall'].append(train_recall_)
self.evals_out['val']['acc'].append(val_acc_)
self.evals_out['val']['AUC'].append(val_auc_)
self.evals_out['val']['precision'].append(val_precision_)
self.evals_out['val']['recall'].append(val_recall_)
# logger evals for MULTICLASS classification
if ((epoch + 1) % self.params['log_evals_step'] == 0
and self.n_outputs > 2 and not return_preds):
self.sess.run(tf.local_variables_initializer())
for i in range(n_val_batches):
X_train_batch, y_train_batch = self.get_sample(
X_train, y_train, train_batch_size)
self.sess.run([g.train_acc_op, g.train_precision_op, g.train_recall_op],
feed_dict={g.X: X_train_batch, g.y: y_train_batch, g.train_flag:False})
X_val_batch, y_val_batch = self.get_sample(
X_val, y_val, val_batch_size)
self.sess.run([g.val_acc_op, g.val_precision_op, g.val_recall_op],
feed_dict={g.X: X_val_batch, g.y: y_val_batch, g.train_flag:False})
train_acc_, train_precision_, train_recall_ =\
self.sess.run([g.train_acc_val, g.train_precision_val, g.train_recall_val])
val_acc_, val_precision_, val_recall_ =\
self.sess.run([g.val_acc_val, g.val_precision_val, g.val_recall_val])
# log evals
self.logger.info(f' {str(epoch + 1):>4} {train_acc_:.4f} ' +
f'{train_precision_:.4f} {train_recall_:.4f} | ' +
f'{val_acc_:.4f} ' +
f'{val_precision_:.4f} {val_recall_:.4f}')
# record evals for plot_results()
self.evals_out['round'].append(epoch + 1)
self.evals_out['train']['acc'].append(train_acc_)
self.evals_out['train']['precision'].append(train_precision_)
self.evals_out['train']['recall'].append(train_recall_)
self.evals_out['val']['acc'].append(val_acc_)
self.evals_out['val']['precision'].append(val_precision_)
self.evals_out['val']['recall'].append(val_recall_)
if save_ckpt:
save_path = saver.save(self.sess, self.ckpt_file)
file_writer.close()
self.logger.info(f'checkpoint saved as \'{self.ckpt_file}\'.')
#------- TODO: ADD SUPPORT FOR MULTI-CLASS -------
if return_preds and self.n_outputs == 1:
chunk_size = int(self.params['predict_chunk_size'])
n_chunks = X_val.shape[0] // chunk_size + 1
fold_preds = []
for i in range(n_chunks):
feed_dict={train_flag:False,
X: X_val.iloc[(i*chunk_size):((i+1)*chunk_size), :].values}
preds_chunk = g.soft_preds_scalar.eval(feed_dict=feed_dict)
fold_preds.extend(preds_chunk.ravel())
return fold_preds
|
nilq/baby-python
|
python
|
import cosypose
import os
import yaml
from joblib import Memory
from pathlib import Path
import getpass
import socket
import torch.multiprocessing
torch.multiprocessing.set_sharing_strategy('file_system')
hostname = socket.gethostname()
username = getpass.getuser()
PROJECT_ROOT = Path(cosypose.__file__).parent.parent
PROJECT_DIR = PROJECT_ROOT
DATA_DIR = PROJECT_DIR / 'data'
LOCAL_DATA_DIR = PROJECT_DIR / 'local_data'
TEST_DATA_DIR = LOCAL_DATA_DIR
DASK_LOGS_DIR = LOCAL_DATA_DIR / 'dasklogs'
SYNT_DS_DIR = LOCAL_DATA_DIR / 'synt_datasets'
BOP_DS_DIR = LOCAL_DATA_DIR / 'bop_datasets'
BOP_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_cosypose'
BOP_CHALLENGE_TOOLKIT_DIR = PROJECT_DIR / 'deps' / 'bop_toolkit_challenge'
EXP_DIR = LOCAL_DATA_DIR / 'experiments'
RESULTS_DIR = LOCAL_DATA_DIR / 'results'
DEBUG_DATA_DIR = LOCAL_DATA_DIR / 'debug_data'
DEPS_DIR = PROJECT_DIR / 'deps'
CACHE_DIR = LOCAL_DATA_DIR / 'joblib_cache'
assert LOCAL_DATA_DIR.exists()
CACHE_DIR.mkdir(exist_ok=True)
TEST_DATA_DIR.mkdir(exist_ok=True)
DASK_LOGS_DIR.mkdir(exist_ok=True)
SYNT_DS_DIR.mkdir(exist_ok=True)
RESULTS_DIR.mkdir(exist_ok=True)
DEBUG_DATA_DIR.mkdir(exist_ok=True)
ASSET_DIR = DATA_DIR / 'assets'
MEMORY = Memory(CACHE_DIR, verbose=2)
CONDA_PREFIX = os.environ['CONDA_PREFIX']
if 'CONDA_PREFIX_1' in os.environ:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX_1']
CONDA_ENV = os.environ['CONDA_DEFAULT_ENV']
else:
CONDA_BASE_DIR = os.environ['CONDA_PREFIX']
CONDA_ENV = 'base'
cfg = yaml.load((PROJECT_DIR / 'config_yann.yaml').read_text(), Loader=yaml.FullLoader)
SLURM_GPU_QUEUE = cfg['slurm_gpu_queue']
SLURM_QOS = cfg['slurm_qos']
DASK_NETWORK_INTERFACE = cfg['dask_network_interface']
# Kwai path
KWAI_PATH = "/data2/cxt/kwai/IMG_3486"
|
nilq/baby-python
|
python
|
from django.contrib.auth.models import User
from django.core import mail
from django.test import TestCase
from hc.api.models import Check
from hc.test import BaseTestCase
class LogoutTestCase(BaseTestCase):
def test_it_logs_out_users(self):
form = {'email': 'alice@example.org', 'password': 'password'}
# make sure a user is logged in successfully
response = self.client.post("/accounts/login/", form)
self.assertEquals(response.status_code, 302)
# logout user and test it redirects to index
r = self.client.get("/accounts/logout", follow=True)
self.assertEqual(r.status_code, 200)
self.assertTemplateUsed('front/welcome.html')
|
nilq/baby-python
|
python
|
"""
Human-explainable AI.
This is the class and function reference of FACET for advanced model selection,
inspection, and simulation.
"""
__version__ = "1.2.0"
__logo__ = (
r"""
_ ____ _ _ ___ __ ___ _____
_-´ _- / ___\ / \ /\ /\ /\ /\ / \ | /\ / ` | |
| ,-´ , | | | __ / _ \ / \/ \ / \/ \ / _ \ |___ / \ | |__ |
| | | | | | |_] |/ ___ \/ /\ /\ \ /\ /\ \/ ___ \ | /----\| | |
`'-| ' \____/_/ \_\/ \/ \_\ \/ \_\/ \_\ | / \\__, |___ |
"""
)[1:]
|
nilq/baby-python
|
python
|
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes
the BroLogReader and simply loops over the static bro log
file, replaying rows and changing any time stamps
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
from __future__ import print_function
import os
import time
import datetime
import itertools
# Third party
import numpy as np
# Local Imports
from brothon import bro_log_reader
from brothon.utils import file_utils
class LiveSimulator(object):
"""LiveSimulator: This class reads in various Bro IDS logs. The class utilizes the
BroLogReader and simply loops over the static bro log file
replaying rows at the specified EPS and changing timestamps to 'now()'
"""
def __init__(self, filepath, eps=10, max_rows=None):
"""Initialization for the LiveSimulator Class
Args:
eps (int): Events Per Second that the simulator will emit events (default = 10)
max_rows (int): The maximum number of rows to generate (default = None (go forever))
"""
# Compute EPS timer
# Logic:
# - Normal distribution centered around 1.0/eps
# - Make sure never less than 0
# - Precompute 1000 deltas and then just cycle around
self.eps_timer = itertools.cycle([max(0, delta) for delta in np.random.normal(1.0/float(eps), .5/float(eps), size=1000)])
# Initialize the Bro log reader
self.log_reader = bro_log_reader.BroLogReader(filepath, tail=False)
# Store max_rows
self.max_rows = max_rows
def readrows(self):
"""Using the BroLogReader this method yields each row of the log file
replacing timestamps, looping and emitting rows based on EPS rate
"""
# Loop forever or until max_rows is reached
num_rows = 0
while True:
# Yield the rows from the internal reader
for row in self.log_reader.readrows():
yield self.replace_timestamp(row)
# Sleep and count rows
time.sleep(next(self.eps_timer))
num_rows += 1
# Check for max_rows
if self.max_rows and (num_rows >= self.max_rows):
return
@staticmethod
def replace_timestamp(row):
"""Replace the timestamp with now()"""
if 'ts' in row:
row['ts'] = datetime.datetime.utcnow()
return row
def test():
"""Test for LiveSimulator Python Class"""
# Grab a test file
data_path = file_utils.relative_dir(__file__, '../data')
test_path = os.path.join(data_path, 'conn.log')
print('Opening Data File: {:s}'.format(test_path))
# Create a LiveSimulator reader
reader = LiveSimulator(test_path, max_rows=10)
for line in reader.readrows():
print(line)
print('Read with max_rows Test successful!')
if __name__ == '__main__':
# Run the test for easy testing/debugging
test()
|
nilq/baby-python
|
python
|
from unittest import TestCase
from dynamic_fixtures.fixtures.basefixture import BaseFixture
class BaseFixtureTestCase(TestCase):
def test_load_not_implemented(self):
"""
Case: load is not implemented
Expected: Error get raised
"""
fixture = BaseFixture("Name", "Module")
with self.assertRaises(NotImplementedError):
fixture.load()
|
nilq/baby-python
|
python
|
from empregado import Empregado
class Operario(Empregado):
def __init__(self, nome, endereco, telefone, codigo_setor, salario_base, imposto, valor_producao, comissao):
super().__init__(nome, endereco, telefone, codigo_setor, salario_base, imposto)
self._valor_producao = valor_producao
self._comissao = comissao
@property
def valor_producao(self):
return self._valor_producao
@valor_producao.setter
def valor_producao(self, valor):
if valor >= 0:
self._valor_producao = valor
@property
def comissao(self):
return self._comissao
@comissao.setter
def comissao(self, comissao):
if 0 <= comissao <= 100:
self._comissao = comissao
def calcular_salario(self):
return super().calcular_salario() + (self.comissao / 100 * self.valor_producao)
|
nilq/baby-python
|
python
|
import inspect
import typing
from chia import instrumentation
class Factory:
name_to_class_mapping: typing.Optional[typing.Dict] = None
default_section: typing.Optional[str] = None
i_know_that_var_args_are_not_supported = False
@classmethod
def create(cls, config: dict, observers=(), **kwargs):
if not hasattr(config, "keys"):
config = {"name": config}
unused_config_keys = set(config.keys())
temp_observable = instrumentation.NamedObservable(cls.__name__)
for observer in observers:
temp_observable.register(observer)
if isinstance(cls.name_to_class_mapping, dict):
name = config["name"]
unused_config_keys -= {"name"}
target_class = cls.name_to_class_mapping[name]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{cls.__name__}.name",
name,
source="config_dict",
)
)
else:
# If mapping is not a dict, interpret as type directly
target_class = cls.name_to_class_mapping
name = target_class.__name__
init_method_signature = inspect.signature(target_class)
call_spec_kwargs = dict()
for parameter, param_spec in init_method_signature.parameters.items():
# Sanity check
if (
param_spec.kind == inspect.Parameter.POSITIONAL_ONLY
or param_spec.kind == inspect.Parameter.VAR_KEYWORD
or param_spec.kind == inspect.Parameter.VAR_POSITIONAL
):
if not cls.i_know_that_var_args_are_not_supported:
raise ValueError(
f"Unsupported kind of constructor parameter {parameter}"
)
else:
# Skip the unsupported parameters
continue
# Try to find it
if parameter in kwargs.keys():
# Parameter-given config keys are not "unused", just overridden
unused_config_keys -= {parameter}
param_value = kwargs[parameter]
elif parameter in config.keys():
unused_config_keys -= {parameter}
param_value = config[parameter]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="config_dict",
)
)
elif f"{name}_userdefaults.{parameter}" in config.keys():
param_value = config[f"{name}_userdefaults.{parameter}"]
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="userdefaults",
)
)
elif param_spec.default != inspect.Signature.empty:
param_value = param_spec.default
temp_observable.notify(
instrumentation.ConfigMessage(
cls.__name__,
f"{target_class.__name__}.{parameter}",
param_value,
source="default",
)
)
else:
raise ValueError(
f"Could not find a value for constructor parameter {parameter}"
)
call_spec_kwargs[parameter] = param_value
# Call constructor
instance = target_class(**call_spec_kwargs)
# Register observers if possible
if isinstance(instance, instrumentation.Observable):
for observer in observers:
instance.register(observer)
# Warn about unused config keys
for unused_config_key in unused_config_keys:
temp_observable.log_warning(
f"Config key {target_class.__name__}.{unused_config_key} unused"
)
for observer in observers:
temp_observable.unregister(observer)
return instance
class ContainerFactory(Factory):
@classmethod
def create(cls, config: dict, **kwargs):
name = config["name"]
target_class = cls.name_to_class_mapping[name]
return target_class(config, **kwargs)
|
nilq/baby-python
|
python
|
from useintest.modules.consul.consul import ConsulServiceController, consul_service_controllers, \
Consul1_0_0ServiceController, Consul0_8_4ServiceController, ConsulDockerisedService
|
nilq/baby-python
|
python
|
import math
# S1: A quick brown dog jumps over the lazy fox.
# S2: A quick brown fox jumps over the lazy dog.
# With the two sentences above I will implement the calculation based on calculation
# values.
def magnitude(v1):
vResult = [abs(a * b) for a, b in zip(v1, v1)]
mag = math.sqrt(sum(vResult, 0))
return mag
def word_order(s1, s2):
c1 = str.split(s1[:-1].lower())
c2 = str.split(s2[:-1].lower())
v1 = list(range(1,len(c1)+1))
v2 = list()
for word in range(len((c1))):
for val in range(len(c2)):
if(c1[word] == c2[val]):
v2.append(val+1)
vResult = [abs(a - b) for a, b in zip(v1, v2)]
vResult2 = [abs(a * b) for a, b in zip(v1, v2)]
mag1 = magnitude(vResult)
mag2 = magnitude(vResult2)
if(mag2 != 0):
val = mag1 / mag2
elif((mag1 + mag2) == 0):
val = 1
else:
val = 0
return val
def isclose(a, b, rel_tol=1e-04, abs_tol=0.0):
return abs(a-b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)
def test_word_order():
s1 = "A quick brown dog jumps over the lazy fox."
s2 = "A quick brown fox jumps over the lazy dog."
s3 = "A quick brown cat jumps over the lazy dog."
s4 = "The fat bird runs across a green bog."
s5 = "Big fat bird runs across an orange bog."
s6 = "Big fat bird is an orange bog."
assert isclose(0.067091, word_order(s1, s2))
assert isclose(0.000000, word_order(s1, s1))
assert isclose(0.806225, word_order(s3, s4))
assert isclose(1, word_order(s3, s5))
assert isclose(1, word_order(s3, s6))
|
nilq/baby-python
|
python
|
import time
import asyncio
from typing import List
import threading
import numpy as np
import spotipy
from spotipy.oauth2 import SpotifyOAuth
from eventhook import EventHook
class NotPlayingError(Exception):
def __init__(self):
self.message = "Spotify not playing"
class MonitorConfig:
def __init__(
self,
refresh_accuracy_seconds: float = 1.0,
refresh_max_delay_seconds: float = 30.0,
refresh_next_event_divisor: float = 1.5,
not_playing_refresh_seconds: float = 5.0,
tick_accuracy_seconds: float = 0.25,
tick_max_delay_seconds: float = 10.0,
tick_next_event_divisor: float = 2.0,
section_offset_seconds: float = 0.25,
):
self.refresh_accuracy_seconds = refresh_accuracy_seconds
self.refresh_max_delay_seconds = refresh_max_delay_seconds
self.refresh_next_event_divisor = refresh_next_event_divisor
self.not_playing_refresh_seconds = not_playing_refresh_seconds
self.tick_accuracy_seconds = tick_accuracy_seconds
self.tick_max_delay_seconds = tick_max_delay_seconds
self.tick_next_event_divisor = tick_next_event_divisor
self.section_offset_seconds = section_offset_seconds
class spotifyMonitor:
def __init__(
self,
config: MonitorConfig = MonitorConfig(),
debug: bool = False,
) -> None:
self.config = config
self.sp = self._generate_spotify_auth()
self.on_track_change = EventHook()
self.on_section_change = EventHook()
self.on_stop = EventHook()
self.current_track = {"id": None, "progress": 0.0, "sections": []}
self.current_section = {"id": None, "track_id": None}
self.next_section = {"id": None, "track_id": None}
self._loop = asyncio.get_event_loop()
self._last_tick = self._get_tick_time()
self.debug = debug
self._ticking = False
self._playing = True
def start(self):
try:
self._loop.call_soon(self._refresh)
self._loop.run_forever()
finally:
self._loop.run_until_complete(self._loop.shutdown_asyncgens())
self._loop.close()
def stop(self):
self._loop.stop()
def _generate_spotify_auth(self) -> spotipy.Spotify:
scope = "user-read-playback-state"
return spotipy.Spotify(
auth_manager=SpotifyOAuth(
scope=scope,
client_id="397df7bde7e64245bf93014ce0d36b4f",
client_secret="5d7d498988714957990b45afa47fdd36",
redirect_uri="http://127.0.0.1:9090",
)
)
def _refresh(self):
try:
self._refresh_track_status()
self._playing = True
if self.debug:
print(" Refresh {}".format(self.current_track["progress"]))
if self._ticking == False:
self._last_tick = self._get_tick_time()
self._ticking = True
self._loop.call_soon(self._tick)
delay = (
self.current_track["duration"] - self.current_track["progress"]
) / self.config.refresh_next_event_divisor
if delay > self.config.refresh_max_delay_seconds:
delay = self.config.refresh_max_delay_seconds
elif delay < self.config.refresh_accuracy_seconds:
delay = self.config.refresh_accuracy_seconds
except NotPlayingError:
if self._playing:
self._playing = False
self.on_stop.fire()
delay = 5
if self.debug:
print(" Refresh (not playing)")
self._loop.call_later(delay=delay, callback=self._refresh)
def _tick(self):
if self._playing:
this_tick = self._get_tick_time()
self.current_track["progress"] += (this_tick - self._last_tick) / 1000
self._last_tick = this_tick
if self.debug:
print(" Tick {}".format(self.current_track["progress"]))
current_section_id = self._calculate_current_section_id(self.current_track)
if current_section_id != self.current_section["id"]:
section_info = self._calculate_section_info(
self.current_track, current_section_id
)
self._trigger_section_change(self.current_track, section_info)
self.current_section = section_info["current_section"]
self.next_section = section_info["next_section"]
delay = (
self.next_section["start"] - self.current_track["progress"]
) / self.config.tick_next_event_divisor
if delay > self.config.tick_max_delay_seconds:
delay = self.config.tick_max_delay_seconds
elif delay < self.config.tick_accuracy_seconds:
delay = self.next_section["start"] - self.current_track["progress"]
if delay < 0:
delay = self.config.tick_accuracy_seconds
self._loop.call_later(delay=delay, callback=self._tick)
else:
self._ticking = False
def _get_tick_time(self) -> float:
return time.time_ns() // 1000000
def _refresh_track_status(self):
current_track = self._get_current_track_status()
track_change = self.current_track["id"] != current_track["id"]
section_info = self._calculate_section_info(current_track)
section_change = (
self.current_section["id"] != section_info["current_section"]["id"]
or self.current_section["track_id"]
!= section_info["current_section"]["track_id"]
)
if track_change:
self._trigger_track_change(current_track, section_info)
elif section_change:
self._trigger_section_change(current_track, section_info)
self.current_track = current_track
self._last_tick = self._get_tick_time()
self.current_section = section_info["current_section"]
self.next_section = section_info["next_section"]
def _trigger_track_change(self, track, section_info):
nth = threading.Thread(
target=self.on_track_change.fire(
previous_track=self.current_track,
current_track=track,
current_section=section_info["current_section"],
next_section=section_info["next_section"],
)
)
nth.start()
def _trigger_section_change(self, track, section_info):
nth = threading.Thread(
target=self.on_section_change.fire(
current_track=track,
current_section=section_info["current_section"],
next_section=section_info["next_section"],
)
)
nth.start()
def _get_current_track_status(self) -> dict:
track = self._get_spotify_currently_playing()
if track["id"] != self.current_track["id"]:
track_info = self._get_spotify_track_info(track_id=track["id"])
track_features = self._get_spotify_track_features(track_id=track["id"])
current_track = {**track, **track_info, **track_features}
else:
current_track = self.current_track
current_track["progress"] = track["progress"]
return current_track
def _calculate_section_info(self, track, current_section_id: int = None) -> dict:
if not current_section_id:
current_section_id = self._calculate_current_section_id(track)
track_sections = track["sections"]
section = {
**{"id": current_section_id, "track_id": track["id"]},
**track_sections[current_section_id],
}
if current_section_id + 1 < len(track_sections):
next_section = track_sections[current_section_id + 1]
else:
next_section = {
"id": 0,
"track_id": None,
"tempo": None,
"loudness": None,
"start": track["duration"],
}
return {"current_section": section, "next_section": next_section}
def _calculate_current_section_id(self, track) -> int:
current_section_id = 0
for index, section in enumerate(track["sections"]):
if section["start"] < track["progress"]:
current_section_id = index
if section["start"] > track["progress"]:
break
return current_section_id
def _get_spotify_currently_playing(self) -> dict:
# print(" CALL to currently_playing")
try:
result = self.sp.currently_playing()
if result:
if result["is_playing"]:
return {
"id": result["item"]["id"],
"name": result["item"]["name"],
"artist": result["item"]["artists"][0]["name"],
"duration": result["item"]["duration_ms"] / 1000,
"progress": result["progress_ms"] / 1000,
}
else:
raise NotPlayingError
else:
raise NotPlayingError
# FIXME - Add 401 error here
except ValueError:
return {
"id": None,
"name": None,
"artist": None,
"duration": None,
"progress": None,
}
def _get_spotify_track_info(self, track_id) -> dict:
# print(" CALL to audio_analysis")
try:
result = self.sp.audio_analysis(track_id=track_id)
for section in result["sections"]:
section["start"] = section["start"] - self.config.section_offset_seconds
loudnesses = [
section["loudness"]
for section in result["sections"]
if "loudness" in section
]
return {
"id": track_id,
"duration": result["track"]["duration"],
"tempo": result["track"]["tempo"],
"loudness": result["track"]["loudness"],
"key": result["track"]["key"],
"sections": result["sections"],
"sections_loudness_mean": np.mean(loudnesses),
"sections_loudness_upperq": np.quantile(loudnesses, 0.75),
}
# FIXME - Add 401 error here
except ValueError:
return {"tempo": None, "loudness": None, "sections": List()}
def _get_spotify_track_features(self, track_id) -> dict:
try:
result = self.sp.audio_features(tracks=[track_id])
return {
"danceability": result[0]["danceability"],
"energy": result[0]["energy"],
"key": result[0]["key"],
"loudness": result[0]["loudness"],
"speechiness": result[0]["speechiness"],
"acousticness": result[0]["acousticness"],
"instrumentalness": result[0]["instrumentalness"],
"liveness": result[0]["liveness"],
"valence": result[0]["valence"],
"tempo": result[0]["tempo"],
"time_signature": result[0]["time_signature"],
}
# FIXME - Add 401 error here
except ValueError:
return {"tempo": None, "loudness": None, "sections": List()}
def _get_playlist(self, playlist_id) -> dict:
try:
result = self.sp.playlist(playlist_id=playlist_id)
tracks = []
for item in result["tracks"]["items"]:
tracks.append(
{
"playlist_name": result["name"],
"playlist_id": result["id"],
"id": item["track"]["id"],
"name": item["track"]["name"],
"duration": item["track"]["duration_ms"] / 1000,
}
)
return tracks
except ValueError:
return []
|
nilq/baby-python
|
python
|
a, b = input().split()
print("Yes" if (int(a + b) ** (1 / 2)).is_integer() else "No")
|
nilq/baby-python
|
python
|
# pip3 install PySocks
import socks
import socket
from urllib import request
from urllib.error import URLError
socks.set_default_proxy(socks.SOCKS5, '127.0.0.1', 9742)
socket.socket = socks.socksocket
try:
response = request.urlopen('http://httpbin.org/get')
print(response.read().decode('utf-8'))
except URLError as e:
print(e.reason)
|
nilq/baby-python
|
python
|
import torch
from kondo import Spec
from torchrl.experiments import BaseExperiment
from torchrl.utils.storage import TransitionTupleDataset
from torchrl.contrib.controllers import DDPGController
class DDPGExperiment(BaseExperiment):
def __init__(self, actor_lr=1e-4, critic_lr=1e-3, gamma=0.99,
tau=1e-2, batch_size=32, buffer_size=1000,
n_ou_reset_interval=100000, **kwargs):
self._controller_args = dict(
actor_lr=actor_lr,
critic_lr=critic_lr,
gamma=gamma,
tau=tau,
n_reset_interval=n_ou_reset_interval,
)
self.buffer = TransitionTupleDataset(size=buffer_size)
self.batch_size = batch_size
super().__init__(**kwargs)
def store(self, transition_list):
self.buffer.extend(transition_list)
def build_controller(self):
return DDPGController(self.envs.observation_space.shape[0],
self.envs.action_space.shape[0],
self.envs.action_space.low,
self.envs.action_space.high,
**self._controller_args,
device=self.device)
def train(self):
if len(self.buffer) < self.batch_size:
return {}
b_idx = torch.randperm(len(self.buffer))[:self.batch_size]
b_transition = [b.to(self.device) for b in self.buffer[b_idx]]
return self.controller.learn(*b_transition)
@staticmethod
def spec_list():
return [
Spec(
group='ddpg',
params=dict(
env_id=['Pendulum-v0'],
gamma=.99,
n_train_interval=1,
n_frames=30000,
batch_size=128,
buffer_size=int(1e6),
actor_lr=1e-4,
critic_lr=1e-3,
tau=1e-2,
# n_ou_reset_interval=10000,
# ou_mu = 0.0
# ou_theta = 0.15
# ou_sigma = 0.2
),
exhaustive=True
)
]
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from numap import NuMap
def hello_world(element, *args, **kwargs):
print "Hello element: %s " % element,
print "Hello args: %s" % (args,),
print "Hello kwargs: %s" % (kwargs,)
return element
ELEMENTS = ('element_0', 'element_1', 'element_2', 'element_3', 'element_4')
result_iterator = NuMap(hello_world, ELEMENTS,
args=('arg_0', 'arg_1'),
kwargs={'kwarg_0':'val_0', 'kwarg_1':'val_1'})
results = tuple(result_iterator)
assert results == ('element_0', 'element_1', 'element_2', 'element_3', 'element_4')
|
nilq/baby-python
|
python
|
import collections
import itertools
import json
from pathlib import Path
import re
import sqlite3
import string
import attr
import nltk
import numpy as np
def clamp(value, abs_max):
value = max(-abs_max, value)
value = min(abs_max, value)
return value
def to_dict_with_sorted_values(d, key=None):
return {k: sorted(v, key=key) for k, v in d.items()}
@attr.s
class SpiderItem:
text = attr.ib()
code = attr.ib()
schema = attr.ib()
orig = attr.ib()
orig_schema = attr.ib()
@attr.s
class Column:
id = attr.ib()
table = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
type = attr.ib()
foreign_key_for = attr.ib(default=None)
@attr.s
class Table:
id = attr.ib()
name = attr.ib()
unsplit_name = attr.ib()
orig_name = attr.ib()
columns = attr.ib(factory=list)
primary_keys = attr.ib(factory=list)
@attr.s
class Schema:
db_id = attr.ib()
tables = attr.ib()
columns = attr.ib()
foreign_key_graph = attr.ib()
orig = attr.ib()
connection = attr.ib(default=None)
@attr.s
class PreprocessedSchema:
column_names = attr.ib(factory=list)
table_names = attr.ib(factory=list)
table_bounds = attr.ib(factory=list)
column_to_table = attr.ib(factory=dict)
table_to_columns = attr.ib(factory=dict)
foreign_keys = attr.ib(factory=dict)
foreign_keys_tables = attr.ib(factory=lambda: collections.defaultdict(set))
primary_keys = attr.ib(factory=list)
STOPWORDS = set(nltk.corpus.stopwords.words("english"))
PUNKS = set(a for a in string.punctuation)
class EncPreproc:
# def __init__(self) -> None:
def __init__(
self,
tables_file,
dataset_path,
include_table_name_in_column,
fix_issue_16_primary_keys,
qq_max_dist,
cc_max_dist,
tt_max_dist,
):
self._tables_file = tables_file
self._dataset_path = dataset_path
self.include_table_name_in_column = include_table_name_in_column
self.fix_issue_16_primary_keys = fix_issue_16_primary_keys
self.texts = collections.defaultdict(list)
self.counted_db_ids = set()
self.preprocessed_schemas = {}
self.qq_max_dist = qq_max_dist
self.cc_max_dist = cc_max_dist
self.tt_max_dist = tt_max_dist
self.relation_ids = {}
def add_relation(name):
self.relation_ids[name] = len(self.relation_ids)
def add_rel_dist(name, max_dist):
for i in range(-max_dist, max_dist + 1):
add_relation((name, i))
add_rel_dist("qq_dist", qq_max_dist)
add_rel_dist("cc_dist", cc_max_dist)
add_rel_dist("tt_dist", tt_max_dist)
rel_names = [
"qc_default",
"qt_default",
"cq_default",
"cc_default",
"cc_foreign_key_forward",
"cc_foreign_key_backward",
"cc_table_match",
"ct_default",
"ct_foreign_key",
"ct_primary_key",
"ct_table_match",
"ct_any_table",
"tq_default",
"tc_default",
"tc_primary_key",
"tc_table_match",
"tc_any_table",
"tc_foreign_key",
"tt_default",
"tt_foreign_key_forward",
"tt_foreign_key_backward",
"tt_foreign_key_both",
"qcCEM",
"cqCEM",
"qtTEM",
"tqTEM",
"qcCPM",
"cqCPM",
"qtTPM",
"tqTPM",
"qcNUMBER",
"cqNUMBER",
"qcTIME",
"cqTIME",
"qcCELLMATCH",
"cqCELLMATCH",
]
for rel in rel_names:
add_relation(rel)
self.schemas = None
self.eval_foreign_key_maps = None
print("before load_trees")
self.schemas, self.eval_foreign_key_maps = self.load_tables([self._tables_file])
print("before connecting")
for db_id, schema in self.schemas.items():
sqlite_path = Path(self._dataset_path) / db_id / f"{db_id}.sqlite"
source: sqlite3.Connection
with sqlite3.connect(sqlite_path) as source:
dest = sqlite3.connect(":memory:")
dest.row_factory = sqlite3.Row
source.backup(dest)
schema.connection = dest
def get_desc(self, tokenized_utterance, db_id):
item = SpiderItem(
text=[x.text for x in tokenized_utterance[1:-1]],
code=None,
schema=self.schemas[db_id],
orig=None,
orig_schema=self.schemas[db_id].orig,
)
return self.preprocess_item(item, "train")
def compute_relations(
self, desc, enc_length, q_enc_length, c_enc_length, c_boundaries, t_boundaries
):
sc_link = desc.get("sc_link", {"q_col_match": {}, "q_tab_match": {}})
cv_link = desc.get("cv_link", {"num_date_match": {}, "cell_match": {}})
# Catalogue which things are where
loc_types = {}
for i in range(q_enc_length):
loc_types[i] = ("question",)
c_base = q_enc_length
for c_id, (c_start, c_end) in enumerate(zip(c_boundaries, c_boundaries[1:])):
for i in range(c_start + c_base, c_end + c_base):
loc_types[i] = ("column", c_id)
t_base = q_enc_length + c_enc_length
for t_id, (t_start, t_end) in enumerate(zip(t_boundaries, t_boundaries[1:])):
for i in range(t_start + t_base, t_end + t_base):
loc_types[i] = ("table", t_id)
relations = np.empty((enc_length, enc_length), dtype=np.int64)
for i, j in itertools.product(range(enc_length), repeat=2):
def set_relation(name):
relations[i, j] = self.relation_ids[name]
i_type, j_type = loc_types[i], loc_types[j]
if i_type[0] == "question":
if j_type[0] == "question":
set_relation(("qq_dist", clamp(j - i, self.qq_max_dist)))
elif j_type[0] == "column":
# set_relation('qc_default')
j_real = j - c_base
if f"{i},{j_real}" in sc_link["q_col_match"]:
set_relation("qc" + sc_link["q_col_match"][f"{i},{j_real}"])
elif f"{i},{j_real}" in cv_link["cell_match"]:
set_relation("qc" + cv_link["cell_match"][f"{i},{j_real}"])
elif f"{i},{j_real}" in cv_link["num_date_match"]:
set_relation("qc" + cv_link["num_date_match"][f"{i},{j_real}"])
else:
set_relation("qc_default")
elif j_type[0] == "table":
j_real = j - t_base
if f"{i},{j_real}" in sc_link["q_tab_match"]:
set_relation("qt" + sc_link["q_tab_match"][f"{i},{j_real}"])
else:
set_relation("qt_default")
elif i_type[0] == "column":
if j_type[0] == "question":
i_real = i - c_base
if f"{j},{i_real}" in sc_link["q_col_match"]:
set_relation("cq" + sc_link["q_col_match"][f"{j},{i_real}"])
elif f"{j},{i_real}" in cv_link["cell_match"]:
set_relation("cq" + cv_link["cell_match"][f"{j},{i_real}"])
elif f"{j},{i_real}" in cv_link["num_date_match"]:
set_relation("cq" + cv_link["num_date_match"][f"{j},{i_real}"])
else:
set_relation("cq_default")
elif j_type[0] == "column":
col1, col2 = i_type[1], j_type[1]
if col1 == col2:
set_relation(("cc_dist", clamp(j - i, self.cc_max_dist)))
else:
set_relation("cc_default")
if desc["foreign_keys"].get(str(col1)) == col2:
set_relation("cc_foreign_key_forward")
if desc["foreign_keys"].get(str(col2)) == col1:
set_relation("cc_foreign_key_backward")
if (
desc["column_to_table"][str(col1)]
== desc["column_to_table"][str(col2)]
):
set_relation("cc_table_match")
elif j_type[0] == "table":
col, table = i_type[1], j_type[1]
set_relation("ct_default")
if self.match_foreign_key(desc, col, table):
set_relation("ct_foreign_key")
col_table = desc["column_to_table"][str(col)]
if col_table == table:
if col in desc["primary_keys"]:
set_relation("ct_primary_key")
else:
set_relation("ct_table_match")
elif col_table is None:
set_relation("ct_any_table")
elif i_type[0] == "table":
if j_type[0] == "question":
i_real = i - t_base
if f"{j},{i_real}" in sc_link["q_tab_match"]:
set_relation("tq" + sc_link["q_tab_match"][f"{j},{i_real}"])
else:
set_relation("tq_default")
elif j_type[0] == "column":
table, col = i_type[1], j_type[1]
set_relation("tc_default")
if self.match_foreign_key(desc, col, table):
set_relation("tc_foreign_key")
col_table = desc["column_to_table"][str(col)]
if col_table == table:
if col in desc["primary_keys"]:
set_relation("tc_primary_key")
else:
set_relation("tc_table_match")
elif col_table is None:
set_relation("tc_any_table")
elif j_type[0] == "table":
table1, table2 = i_type[1], j_type[1]
if table1 == table2:
set_relation(("tt_dist", clamp(j - i, self.tt_max_dist)))
else:
set_relation("tt_default")
forward = table2 in desc["foreign_keys_tables"].get(
str(table1), ()
)
backward = table1 in desc["foreign_keys_tables"].get(
str(table2), ()
)
if forward and backward:
set_relation("tt_foreign_key_both")
elif forward:
set_relation("tt_foreign_key_forward")
elif backward:
set_relation("tt_foreign_key_backward")
return relations
@classmethod
def match_foreign_key(cls, desc, col, table):
foreign_key_for = desc["foreign_keys"].get(str(col))
if foreign_key_for is None:
return False
foreign_table = desc["column_to_table"][str(foreign_key_for)]
return desc["column_to_table"][str(col)] == foreign_table
def validate_item(self, item, section):
return True, None
def preprocess_item(self, item, validation_info):
question, question_for_copying = item.text, item.text
question = [x.replace("Ġ", "") for x in question]
question_for_copying = [x.replace("Ġ", "") for x in question_for_copying]
preproc_schema = self._preprocess_schema(item.schema)
assert preproc_schema.column_names[0][0].startswith("<type:")
column_names_without_types = [col[1:] for col in preproc_schema.column_names]
sc_link = self.compute_schema_linking(
question, column_names_without_types, preproc_schema.table_names
)
# print(sc_link)
cv_link = self.compute_cell_value_linking(question, item.schema)
# if cv_link['cell_match']:
# print(question)
return {
"raw_question": question,
"question": question,
"question_for_copying": question_for_copying,
"db_id": item.schema.db_id,
"sc_link": sc_link,
"cv_link": cv_link,
"columns": preproc_schema.column_names,
"tables": preproc_schema.table_names,
"table_bounds": preproc_schema.table_bounds,
"column_to_table": preproc_schema.column_to_table,
"table_to_columns": preproc_schema.table_to_columns,
"foreign_keys": preproc_schema.foreign_keys,
"foreign_keys_tables": preproc_schema.foreign_keys_tables,
"primary_keys": preproc_schema.primary_keys,
}
def _preprocess_schema(self, schema):
if schema.db_id in self.preprocessed_schemas:
return self.preprocessed_schemas[schema.db_id]
result = self.preprocess_schema_uncached(
schema,
self._tokenize,
self.include_table_name_in_column,
self.fix_issue_16_primary_keys,
)
self.preprocessed_schemas[schema.db_id] = result
return result
def _tokenize(self, presplit, unsplit):
return presplit
def _tokenize_for_copying(self, presplit, unsplit):
return presplit, presplit
# schema linking, similar to IRNet
@classmethod
def compute_schema_linking(cls, question, column, table):
def partial_match(x_list, y_list):
x_str = " ".join(x_list)
y_str = " ".join(y_list)
if x_str in STOPWORDS or x_str in PUNKS:
return False
if re.match(rf"\b{re.escape(x_str)}\b", y_str):
assert x_str in y_str
return True
else:
return False
def exact_match(x_list, y_list):
x_str = " ".join(x_list)
y_str = " ".join(y_list)
if x_str == y_str:
return True
else:
return False
q_col_match = dict()
q_tab_match = dict()
col_id2list = dict()
for col_id, col_item in enumerate(column):
if col_id == 0:
continue
col_id2list[col_id] = col_item
tab_id2list = dict()
for tab_id, tab_item in enumerate(table):
tab_id2list[tab_id] = tab_item
# 5-gram
n = 5
while n > 0:
for i in range(len(question) - n + 1):
n_gram_list = question[i : i + n]
n_gram = " ".join(n_gram_list)
if len(n_gram.strip()) == 0:
continue
# exact match case
for col_id in col_id2list:
if exact_match(n_gram_list, col_id2list[col_id]):
for q_id in range(i, i + n):
q_col_match[f"{q_id},{col_id}"] = "CEM"
for tab_id in tab_id2list:
if exact_match(n_gram_list, tab_id2list[tab_id]):
for q_id in range(i, i + n):
q_tab_match[f"{q_id},{tab_id}"] = "TEM"
# partial match case
for col_id in col_id2list:
if partial_match(n_gram_list, col_id2list[col_id]):
for q_id in range(i, i + n):
if f"{q_id},{col_id}" not in q_col_match:
q_col_match[f"{q_id},{col_id}"] = "CPM"
for tab_id in tab_id2list:
if partial_match(n_gram_list, tab_id2list[tab_id]):
for q_id in range(i, i + n):
if f"{q_id},{tab_id}" not in q_tab_match:
q_tab_match[f"{q_id},{tab_id}"] = "TPM"
n -= 1
return {"q_col_match": q_col_match, "q_tab_match": q_tab_match}
@classmethod
def load_tables(cls, paths):
schemas = {}
eval_foreign_key_maps = {}
for path in paths:
schema_dicts = json.load(open(path))
for schema_dict in schema_dicts:
tables = tuple(
Table(
id=i,
name=name.split(),
unsplit_name=name,
orig_name=orig_name,
)
for i, (name, orig_name) in enumerate(
zip(
schema_dict["table_names"],
schema_dict["table_names_original"],
)
)
)
columns = tuple(
Column(
id=i,
table=tables[table_id] if table_id >= 0 else None,
name=col_name.split(),
unsplit_name=col_name,
orig_name=orig_col_name,
type=col_type,
)
for i, (
(table_id, col_name),
(_, orig_col_name),
col_type,
) in enumerate(
zip(
schema_dict["column_names"],
schema_dict["column_names_original"],
schema_dict["column_types"],
)
)
)
# Link columns to tables
for column in columns:
if column.table:
column.table.columns.append(column)
for column_id in schema_dict["primary_keys"]:
# Register primary keys
column = columns[column_id]
column.table.primary_keys.append(column)
foreign_key_graph = None
for source_column_id, dest_column_id in schema_dict["foreign_keys"]:
# Register foreign keys
source_column = columns[source_column_id]
dest_column = columns[dest_column_id]
source_column.foreign_key_for = dest_column
db_id = schema_dict["db_id"]
assert db_id not in schemas
schemas[db_id] = Schema(
db_id, tables, columns, foreign_key_graph, schema_dict
)
eval_foreign_key_maps[db_id] = cls.build_foreign_key_map(schema_dict)
for db_id, schema_el in schemas.items():
san2orig = {}
orig2san = {}
for table_el in schema_el.tables:
sanitized_name = f"{'_'.join(table_el.name)}".lower()
orig_name = f"{table_el.orig_name}".lower()
san2orig[sanitized_name] = orig_name
orig2san[orig_name] = sanitized_name
for col_el in table_el.columns:
sanitized_name = (
f"{'_'.join(col_el.table.name)}.{'_'.join(col_el.name)}".lower()
)
orig_name = f"{col_el.table.orig_name}.{col_el.orig_name}".lower()
san2orig[sanitized_name] = orig_name
orig2san[orig_name] = sanitized_name
schema_el.san2orig = san2orig
schema_el.orig2san = orig2san
return schemas, eval_foreign_key_maps
@classmethod
def build_foreign_key_map(cls, entry):
cols_orig = entry["column_names_original"]
tables_orig = entry["table_names_original"]
# rebuild cols corresponding to idmap in Schema
cols = []
for col_orig in cols_orig:
if col_orig[0] >= 0:
t = tables_orig[col_orig[0]]
c = col_orig[1]
cols.append("__" + t.lower() + "." + c.lower() + "__")
else:
cols.append("__all__")
def keyset_in_list(k1, k2, k_list):
for k_set in k_list:
if k1 in k_set or k2 in k_set:
return k_set
new_k_set = set()
k_list.append(new_k_set)
return new_k_set
foreign_key_list = []
foreign_keys = entry["foreign_keys"]
for fkey in foreign_keys:
key1, key2 = fkey
key_set = keyset_in_list(key1, key2, foreign_key_list)
key_set.add(key1)
key_set.add(key2)
foreign_key_map = {}
for key_set in foreign_key_list:
sorted_list = sorted(list(key_set))
midx = sorted_list[0]
for idx in sorted_list:
foreign_key_map[cols[idx]] = cols[midx]
return foreign_key_map
@classmethod
def compute_cell_value_linking(cls, tokens, schema):
def isnumber(word):
try:
float(word)
return True
except:
return False
def db_word_match(word, column, table, db_conn):
# return False #fixme
cursor = db_conn.cursor()
word = word.replace("'", "")
p_str = (
f"select {column} from {table} where {column} like '{word} %' or {column} like '% {word}' or "
f"{column} like '% {word} %' or {column} like '{word}'"
)
# return False # TODO: fixmes
# print("hi")
try:
cursor.execute(p_str)
p_res = cursor.fetchall()
if len(p_res) == 0:
return False
else:
return p_res
except sqlite3.OperationalError as e:
# print(p_str)
return False
num_date_match = {}
cell_match = {}
for q_id, word in enumerate(tokens):
if len(word.strip()) == 0:
continue
if word in STOPWORDS or word in PUNKS:
continue
num_flag = isnumber(word)
CELL_MATCH_FLAG = "CELLMATCH"
for col_id, column in enumerate(schema.columns):
if col_id == 0:
assert column.orig_name == "*"
continue
# word is number
if num_flag:
if column.type in ["number", "time"]: # TODO fine-grained date
num_date_match[f"{q_id},{col_id}"] = column.type.upper()
else:
ret = db_word_match(
word,
column.orig_name,
column.table.orig_name,
schema.connection,
)
if ret:
# print(word, ret)
cell_match[f"{q_id},{col_id}"] = CELL_MATCH_FLAG
cv_link = {"num_date_match": num_date_match, "cell_match": cell_match}
return cv_link
@classmethod
def preprocess_schema_uncached(
cls,
schema,
tokenize_func,
include_table_name_in_column,
fix_issue_16_primary_keys,
):
r = PreprocessedSchema()
last_table_id = None
for i, column in enumerate(schema.columns):
col_toks = tokenize_func(column.name, column.unsplit_name)
# assert column.type in ["text", "number", "time", "boolean", "others"]
type_tok = f"<type: {column.type}>"
column_name = [type_tok] + col_toks
if include_table_name_in_column:
if column.table is None:
table_name = ["<any-table>"]
else:
table_name = tokenize_func(
column.table.name, column.table.unsplit_name
)
column_name += ["<table-sep>"] + table_name
r.column_names.append(column_name)
table_id = None if column.table is None else column.table.id
r.column_to_table[str(i)] = table_id
if table_id is not None:
columns = r.table_to_columns.setdefault(str(table_id), [])
columns.append(i)
if last_table_id != table_id:
r.table_bounds.append(i)
last_table_id = table_id
if column.foreign_key_for is not None:
r.foreign_keys[str(column.id)] = column.foreign_key_for.id
r.foreign_keys_tables[str(column.table.id)].add(
column.foreign_key_for.table.id
)
r.table_bounds.append(len(schema.columns))
assert len(r.table_bounds) == len(schema.tables) + 1
for i, table in enumerate(schema.tables):
table_toks = tokenize_func(table.name, table.unsplit_name)
r.table_names.append(table_toks)
last_table = schema.tables[-1]
r.foreign_keys_tables = to_dict_with_sorted_values(r.foreign_keys_tables)
r.primary_keys = (
[column.id for table in schema.tables for column in table.primary_keys]
if fix_issue_16_primary_keys
else [
column.id
for column in last_table.primary_keys
for table in schema.tables
]
)
return r
|
nilq/baby-python
|
python
|
# SPDX-FileCopyrightText: 2022 Eva Herrada for Adafruit Industries
# SPDX-License-Identifier: MIT
import board
from kmk.kmk_keyboard import KMKKeyboard as _KMKKeyboard
from kmk.matrix import DiodeOrientation
class KMKKeyboard(_KMKKeyboard):
row_pins = (board.D10, board.MOSI, board.MISO, board.D8)
col_pins = (
board.D4,
board.D7,
board.SCK,
)
diode_orientation = DiodeOrientation.COLUMNS
i2c = board.I2C
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
counter = 100 # An integer assignment
miles = 1000.0 # A floating point
name = "John" # A string
print (counter)
print (miles)
print (name)
|
nilq/baby-python
|
python
|
from .tests import _index
def main():
suites = _index.suites
passes = 0
fails = 0
for s in suites:
s.run()
print(s)
passes += s.passes
fails += s.fails
print(f'###################\nSUMMARY OF ALL TEST SUITES\nTotal Passing Tests: {passes}\nTotal Failing Tests: {fails}\nPercent Passing: {(passes/(passes+fails)) * 100}%')
|
nilq/baby-python
|
python
|
#!/usr/bin/python
# -*- coding: latin-1 -*-
import os, subprocess
import numpy as np
import GenericUsefulScripts as GUS
from astropy import units as u
from astropy.io import ascii, fits
from astropy.convolution import convolve
from astropy.stats import SigmaClip
from astropy.coordinates import SkyCoord
from photutils.background import MedianBackground, Background2D
from skimage.transform import resize
import multiprocessing
import ChrisFuncs
import pandas as pd
space = ' '
def data_reduction(galaxy_name, path_fits_input = 'standard'):
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
for data in list_data:
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
continue
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
print()
print('Data reduction phase over.')
print()
return
def data_reduction_parallel(galaxy_name, processes = 5, path_fits_input = 'standard'):
from itertools import repeat
# ---------------------------------------------------------------------------
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
subtable = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
ra, dec = subtable['ra'].values[0], subtable['dec'].values[0]
ap_cen_coord = SkyCoord(ra*u.deg, dec*u.deg, frame = 'fk5')
semimaj = subtable['semimaj_arcsec'].values[0]
axial_ratio, pos_angle = subtable['axial_ratio'].values[0], subtable['pos_angle'].values[0]
# ---------------------------------------------------------------------------
subprocess.call('mkdir ../'+galaxy_name+'/_ReducedMaps/', shell = True)
list_data = []
if path_fits_input == 'standard': path_fits_input = '../'+galaxy_name+'/Caapr/Temp/Processed_Maps'
else: path_fits_input = '../'+galaxy_name+'/'+path_fits_input
header_fits = '../'+galaxy_name+'/Caapr/Maps/'
print('Reading original maps...')
filelist = [x for x in os.listdir('Caapr/Maps') if x.endswith('.fits')]
for file in filelist:
if file.endswith('Thumbnail.fits'): continue # Don't work with thumbnails
elif file.endswith('Error.fits'): continue # Don't work with Errors
signal_path = path_fits_input+'/'+file
list_data.append(GUS.FitsUtils(signal_path))
print(space+signal_path+' read')
print('...done!')
print()
pool = multiprocessing.Pool()
with multiprocessing.Pool(processes=processes) as pool:
func = zip(list_data, repeat(galaxy_name), \
repeat(ap_cen_coord), repeat(semimaj), repeat(axial_ratio), repeat(pos_angle))
pool.starmap(reduction_loop_parallel, func)
print()
print('Data reduction phase over.')
print()
return
def reduction_loop_parallel(data, galaxy_name, ap_cen_coord, semimaj, axial_ratio, pos_angle):
if os.path.exists('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits'):
print(data.bandname+'.fits already reduced, skipping to next band')
return
else: print('Processing band', data.bandname)
# Galaxy Aperture Stuff, from Dustpedia (to mask and bkg evaluation purposes)
centre_x, centre_y = ap_cen_coord.to_pixel(data.wcs)
pixel_scale = (data.get_pixel_scale()*u.deg).to('arcsec').value
Gal_Ap_Stuff = centre_x, centre_y, semimaj/pixel_scale, axial_ratio, pos_angle
# Reduce band
signal_reduced = reduce(data, Gal_Ap_Stuff)
# Save fits
hdu = fits.PrimaryHDU(signal_reduced)
hdu.header = data.hdr
hdu.writeto('../'+galaxy_name+'/_ReducedMaps/'+data.bandname+'.fits')
return
def reduce(data, Gal_Ap_Stuff, psf_degrad = True, sky_sub = True):
#if data.bandname[:7] == 'Spitzer':
# print
# print(space+"Spitzer bands usually have a problem with sky subtraction")
# print(space+"Evaluated background average is "+str(bkg_average)+". Perhaps it's too low.")
# print(space+"Do you want to insert the bkg average by hand? (insert value or n)")
# answer = raw_input()
# if answer == 'n': pass
# else: bkg_average = float(answer)
#else: pass
ok_nan = np.where(np.nan_to_num(data.signal_with_nans-1) == 0) # I know, can't do anything 'bout it
if sky_sub:
# Sky subtraction
print(space+'Sky subtraction for '+data.bandname+' ...')
# 1) Flatten the background
signal_flat, check_sub = sky_flattening(data, Gal_Ap_Stuff)
# 2) If check_sub is sub, the sky has already been flattened + removed
# if not, remove the average background
if check_sub == 'sub':
signal_skysub = signal_flat.copy()
elif check_sub == 'unsub':
bkg_average = evaluate_bkg_avg(signal_flat, Gal_Ap_Stuff)
if bkg_average < 0:
print(space+"Evaluated background average is lower than 0. Returning original map.")
signal_skysub = signal_flat.copy()
else:
print(space+"Evaluated background average is {0:.2E}".format(bkg_average))
signal_skysub = signal_flat - bkg_average
else:
print(space+'No sky flattening + subtraction requested. Hey, whatever you want.')
signal_skysub = data.signal.copy()
if psf_degrad:
print(space+'PSF degradation for '+data.bandname+' ...')
if data.bandname == 'SPIRE_350':
return signal_skysub
else:
try:
kernel_path = '../_kernels/Kernel_LoRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
except:
print(space+'No LowResolution kernel, switching to (slower) HighResolution.')
kernel_path = '../_kernels/Kernel_HiRes_'+data.bandname+'_to_SPIRE_350.fits'
kernel = fits.getdata(kernel_path)
kernel_resized = resize(kernel, (101, 101), preserve_range = True)
signal_conv = convolve(signal_skysub, kernel = kernel_resized, boundary = None, preserve_nan = True)
signal_conv[ok_nan] = np.nan
return signal_conv
else:
print(space+'No PSF degradation requested. I beg you to reconsider.')
signal_skysub[ok_nan] = np.nan
return signal_skysub
def sky_flattening(data, Gal_Ap_Stuff):
from astropy.modeling.polynomial import Polynomial2D
from astropy.modeling.fitting import LevMarLSQFitter
from scipy.ndimage.interpolation import zoom
# 1) Read data, get pixel scale
image = data.signal_with_nans
pix_size = (data.get_pixel_scale()*u.deg).to('arcsec').value
bandname = data.bandname
# 2) If image has pixels smaller than some limit, downsample image to improve processing time
pix_size_limit = 2.0
if pix_size<pix_size_limit: downsample_factor = int(np.ceil(pix_size_limit/pix_size))
else: downsample_factor = 1
image_ds = GUS.Downsample(image, downsample_factor)
# 3) Sigma clip the downsampled image
clip_value = GUS.SigmaClip(image_ds, tolerance=0.01, sigma_thresh=3.0, median=True)
noise_value = clip_value[0]
field_value = clip_value[1]
cutoff_sigma = 2.0
cutoff = field_value + ( cutoff_sigma * noise_value )
# 4) Mask the image removing galaxy emission...
image_masked = image_ds.copy()
centre_i, centre_j, mask_semimaj_pix, mask_axial_ratio, mask_angle = Gal_Ap_Stuff
ellipse_mask = EllipseMask(image_ds, mask_semimaj_pix/downsample_factor, mask_axial_ratio, mask_angle, centre_i/downsample_factor, centre_j/downsample_factor)
image_masked[ np.where( ellipse_mask==1 ) ] = np.nan
# ...and image pixels identified as having high SNR
image_masked[ np.where( image_masked>cutoff ) ] = np.nan
# 5) Use astropy to set up 2-dimensional polynomial to the image
image_masked[ np.where( np.isnan(image_masked)==True ) ] = field_value
poly_model = Polynomial2D(degree=5)
i_coords, j_coords = np.mgrid[:image_masked.shape[0], :image_masked.shape[1]]
fitter = LevMarLSQFitter()
i_coords = i_coords.flatten()
j_coords = j_coords.flatten()
image_flattened = image_masked.flatten()
good = np.where(np.isnan(image_flattened)==False)
i_coords = i_coords[good]
j_coords = j_coords[good]
# 6) Attempt polynomial fit; if insufficient data then skip onwards
image_flattened = image_flattened[good]
try:
fit = fitter(poly_model, i_coords, j_coords, image_flattened)
except:
print(space+'Error fitting polinomial sky model. Returning unalterated image.')
return image
# 7) Create final polynomial filter (undoing downsampling using lorenzoriano GitHub script)
i_coords, j_coords = np.mgrid[:image_ds.shape[0], :image_ds.shape[1]]
poly_fit = fit(i_coords, j_coords)
poly_full = zoom(poly_fit, [ float(image.shape[0])/float(poly_fit.shape[0]), \
float(image.shape[1])/float(poly_fit.shape[1]) ], mode='nearest')
# 8) Establish background variation before application of filter
sigma_thresh = 3.0
clip_in = GUS.SigmaClip(image, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_in = image[ np.where( image<clip_in[1] ) ]
spread_in = np.mean( np.abs( bg_in - clip_in[1] ) )
# 9) How much reduction in background variation there was due to application of the filter
image_sub = image - poly_full
clip_sub = GUS.SigmaClip(image_sub, tolerance=0.005, median=True, sigma_thresh=sigma_thresh)
bg_sub = image_sub[ np.where( image_sub < clip_sub[1] ) ]
spread_sub = np.mean( np.abs( bg_sub - clip_sub[1] ) )
spread_diff = spread_in / spread_sub
# If the filter made significant difference, apply to image and return it; otherwise, just return the unaltered map
if spread_diff>1.1:
print(space+bandname+' background is significantly variable; removing polynomial background fit.')
return image_sub, 'sub'
else:
print(space+bandname+' background is not significantly variable; leaving image unaltered.')
return image, 'unsub'
def evaluate_bkg_avg(image, Gal_Ap_Stuff):
'''
Function to evaluate the mean background in an elliptical annulus between 1.25 and 1.601 times the galaxy semimajor axis (from DustPedia photometric table).
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the mean background per pixel.
'''
centre_x, centre_y, semimaj_pix, axial_ratio, pos_angle = Gal_Ap_Stuff
# =========
# Evaluate pixels in background annulus
bg_inner_semimaj_pix = semimaj_pix * 1.25
bg_width = (semimaj_pix * 1.601) - bg_inner_semimaj_pix
bg_calc = AnnulusSum(image, bg_inner_semimaj_pix, bg_width, axial_ratio, pos_angle, centre_x, centre_y)
bg_clip = GUS.SigmaClip(bg_calc[2], median=False, sigma_thresh=3.0)
# =========
return bg_clip[1]
def check_Dustpedia(galaxy_name, working_bands):
'''
Function to check if DustPedia photometric flux and the one measured in the same apertures with our data reduction are compatible.
Args: Galaxy name, working bands, if wanted, perform Galactic Extinction Correction
Returns: Nothing, generates a plot in Reduction folder.
'''
import os, subprocess
from astropy.io import fits, ascii
from astropy import units as u
import pandas as pd
import numpy as np
from photutils import SkyEllipticalAperture, SkyEllipticalAnnulus, aperture_photometry
from astropy.coordinates import SkyCoord
from matplotlib import pyplot as plt
subprocess.call('mkdir ../'+galaxy_name+'/Reduction/', shell = True)
path_galaxy_photometry = '../'+galaxy_name+'/Reduction/'+galaxy_name+'_photometry.dat'
# =========
# Read DustPedia Photometric Table
DustPedia_Photom = pd.read_csv('../DustPedia_Tables/DustPedia_Aperture_Photometry_2.2.csv')
# Rearrange in order of increasing effective wavelenght
right_order = [u'name', u'ra', u'dec', u'semimaj_arcsec', u'axial_ratio', u'pos_angle', u'global_flag',
u'GALEX_FUV', u'GALEX_FUV_err', u'GALEX_FUV_flag', u'GALEX_NUV', u'GALEX_NUV_err', u'GALEX_NUV_flag',
u'SDSS_u', u'SDSS_u_err', u'SDSS_u_flag', u'SDSS_g', u'SDSS_g_err', u'SDSS_g_flag',
u'SDSS_r', u'SDSS_r_err', u'SDSS_r_flag', u'SDSS_i', u'SDSS_i_err', u'SDSS_i_flag',
u'SDSS_z', u'SDSS_z_err', u'SDSS_z_flag',
u'2MASS_J', u'2MASS_J_err', u'2MASS_J_flag', u'2MASS_H', u'2MASS_H_err', u'2MASS_H_flag',
u'2MASS_Ks', u'2MASS_Ks_err', u'2MASS_Ks_flag',
u'WISE_3.4', u'WISE_3.4_err', u'WISE_3.4_flag', u'Spitzer_3.6', u'Spitzer_3.6_err', u'Spitzer_3.6_flag',
u'Spitzer_4.5', u'Spitzer_4.5_err', u'Spitzer_4.5_flag', u'WISE_4.6', u'WISE_4.6_err', u'WISE_4.6_flag',
u'Spitzer_5.8', u'Spitzer_5.8_err', u'Spitzer_5.8_flag', u'Spitzer_8.0', u'Spitzer_8.0_err', u'Spitzer_8.0_flag',
u'WISE_12', u'WISE_12_err', u'WISE_12_flag', u'WISE_22', u'WISE_22_err', u'WISE_22_flag',
u'Spitzer_24', u'Spitzer_24_err', u'Spitzer_24_flag', u'Spitzer_70', u'Spitzer_70_err', u'Spitzer_70_flag',
u'PACS_70', u'PACS_70_err', u'PACS_70_flag', u'PACS_100', u'PACS_100_err', u'PACS_100_flag',
u'PACS_160', u'PACS_160_err', u'PACS_160_flag', u'Spitzer_160', u'Spitzer_160_err', u'Spitzer_160_flag',
u'SPIRE_250', u'SPIRE_250_err', u'SPIRE_250_flag', u'SPIRE_350', u'SPIRE_350_err', u'SPIRE_350_flag',
u'SPIRE_500', u'SPIRE_500_err', u'SPIRE_500_flag']
DustPedia_Photom = DustPedia_Photom[right_order]
gal_phot = DustPedia_Photom.loc[DustPedia_Photom['name'] == galaxy_name]
# Fist, remove _flag columns
to_remove = gal_phot.columns.str.contains('flag', case=False)
gal_phot = gal_phot.loc[:,~to_remove]
# Extract ra, dec, semimaj, axial ratio and pos_angle, then remove them
ra, dec = gal_phot['ra'].values[0], gal_phot['dec'].values[0]
semimaj, axial_ratio, pos_angle = gal_phot['semimaj_arcsec'].values[0], gal_phot['axial_ratio'].values[0], gal_phot['pos_angle'].values[0]
to_remove = ['name', 'ra', 'dec', 'semimaj_arcsec', 'axial_ratio', 'pos_angle']
gal_phot = gal_phot.drop(columns=to_remove)
# And remove empy columns
#gal_phot = gal_phot.dropna(axis='columns')
# Extract working bands fluxes and errors
gal_phot_flux = gal_phot[working_bands]
gal_phot_flux = gal_phot_flux.transpose()
working_bands_err = [t+'_err' for t in working_bands]
gal_phot_err = gal_phot[working_bands_err]
gal_phot_err = gal_phot_err.transpose()
galaxy_photometry = pd.DataFrame(np.concatenate((gal_phot_flux.values, gal_phot_err.values), axis=1))
galaxy_photometry.columns = ['Flux', 'Error']
galaxy_photometry.index = working_bands
galaxy_photometry = galaxy_photometry.fillna(0) # Fill NaN entries with zeroes
# Save
galaxy_photometry.index.names = ['Band'] # Rename the index column as "Band"
galaxy_photometry.to_csv(path_galaxy_photometry, sep='\t', index = False)
# =========
# =========
# APERTURES
# Read the apertures + radii
positions = SkyCoord(ra*u.deg, dec*u.deg, frame='icrs')
DustPedia_aperture = SkyEllipticalAperture(positions, a=semimaj*u.arcsec, b=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
DustPedia_annulus = SkyEllipticalAnnulus(positions, a_in=semimaj*u.arcsec*1.25, a_out=semimaj*u.arcsec*1.601, \
b_out=semimaj*u.arcsec/axial_ratio, theta=pos_angle*u.deg)
# =========
# =========
# Galactic Extinction Correction dictionary
GalCorr_path = '../'+galaxy_name+'/galactic_extinction_correction.txt'
if os.path.exists(GalCorr_path): pass
else: GalExtCorr(galaxy_name, working_bands, ra, dec)
GalCorrection_dictionary = dict(zip(ascii.read(GalCorr_path)['Band'].data, \
ascii.read(GalCorr_path)['Correction'].data))
# =========
# =========
# Read reduced data and perform photometry
path_fits = '../'+galaxy_name+'/_ReducedMaps/'
list_data = []
for file in os.listdir(path_fits):
if not file.endswith('.fits'): continue
elif file.startswith('In'): continue
list_data.append(GUS.FitsUtils(path_fits+file))
list_fluxes = []
for data in list_data:
# Perform photometry
phot_table = aperture_photometry(data.signal, DustPedia_aperture, wcs = data.wcs)
phot_table['aperture_sum'].info.format = '%.4g'
# Put results in a single file
phot = GUS.round_arr(phot_table['aperture_sum'].data, 2)
# Galactic extintion correction
phot *= GalCorrection_dictionary[data.bandname]
list_fluxes.append(abs(phot))
fluxes = np.array(list_fluxes)
# Sort w.r.t wavelengths
list_wvl = (t.get_wavelength() for t in list_data)
list_band = (t.bandname for t in list_data)
wvl, fluxes, bandnames = (t for t in zip(*sorted(zip(list_wvl, fluxes, list_band))))
wvl, fluxes = np.array(wvl), np.array(fluxes)[:,0]
# Save the results
ascii.write([bandnames, GUS.round_arr(wvl,2), GUS.round_arr(fluxes, 2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_fluxes.txt', \
names = ['Band', 'Wvl', 'Fluxes'], overwrite=True)
# =========
# =========
# Re-read Dustpedia Photometry
data_CAAPR = ascii.read(path_galaxy_photometry)
fluxes_CAAPR, errors_CAAPR = data_CAAPR['Flux'].data, data_CAAPR['Error'].data
compatibility = np.abs(np.array(fluxes_CAAPR) - np.array(fluxes))/np.sqrt(np.array(errors_CAAPR)**2)
ascii.write([GUS.round_arr(compatibility,2)], '../'+galaxy_name+'/Reduction/'+galaxy_name+'_comp.txt', format='fixed_width_two_line', \
names = ['Comp'], overwrite=True)
# =========
# =========
# Plot
xmin, xmax = np.array(wvl).min(), np.array(wvl).max()
DustpediaCheckPlot = plt.figure(figsize=(15,5))
plt.subplot(2,1,1)
plt.plot(np.array(wvl), np.array(fluxes_CAAPR), \
linestyle = 'None', marker = '.', color = 'navy', label = 'CAAPR+Literature Photometry')
plt.plot(wvl, fluxes, linestyle = 'None', marker = '.', color = 'red', label = 'My Photometry')
plt.xscale('log'), plt.yscale('log')
plt.ylabel(r'Flux (Jy)')
plt.legend()
plt.subplot(2,1,2)
plt.axhline(5, color = 'r', linestyle = '-')
plt.plot(wvl, compatibility, ms = 10.0, linestyle = 'None', color = 'k', marker = '.')
for i in range(len(wvl)):
plt.text(wvl[i], 0.5, bandnames[i], rotation = 90)
plt.xscale('log'), plt.yscale('log')
plt.xlabel(r'Wavelength ($\mu$m)'), plt.ylabel(r'Compatibility $\lambda$')
plt.subplots_adjust(hspace=0.,wspace=0.)
DustpediaCheckPlot.savefig('../'+galaxy_name+'/Reduction/'+galaxy_name+'_SED.pdf', bbox_inches = 'tight')
# =========
return
def GalExtCorr(galaxy_name, list_band, ra, dec):
list_correction = []
for band in list_band:
try:
if band == 'Spitzer_3.6': band = 'IRAC1'
elif band == 'Spitzer_4.5': band = 'IRAC2'
elif band == 'Spitzer_5.8': band = 'IRAC3'
elif band == 'Spitzer_8.0': band = 'IRAC4'
elif band == 'WISE_3.4': band = 'WISE1'
elif band == 'WISE_4.6': band = 'WISE2'
correction = ChrisFuncs.ExtCorrrct(ra, dec, band, verbose = False)
list_correction.append(correction)
except: list_correction.append(1)
ascii.write([list_band, list_correction], \
'../'+galaxy_name+'/galactic_extinction_correction.txt', names = ['Band', 'Correction'])
return
##################################
# QUI COPIO BRUTALMENTE DA CLARK #
##################################
def AnnulusSum(array, rad_inner, width, axial_ratio, angle, i_centre, j_centre):
'''
Function to sum all elements in an annulus centred upon the middle of the given array
Args: Array, semi-major axis of inside edge of annulus (pix), width of annulus (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Numpy array containing the sum of the pixel values in the annulus, the total number of pixels counted, and an array containing the pixel values
'''
# Create slice of input array, containing only the region of interest
i_cutout_min = int(np.floor(max([0, i_centre-(rad_inner+width)])))
i_cutout_max = int(np.ceil(min([(array.shape)[0], i_centre+(rad_inner+width)])))
j_cutout_min = int(np.floor(max([0, j_centre-(rad_inner+width)])))
j_cutout_max = int(np.ceil(min([(array.shape)[1], j_centre+(rad_inner+width)])))
array_slice = array[ int(round(i_cutout_min)):int(round(i_cutout_max))+1, int(round(j_cutout_min)):int(round(j_cutout_max))+1 ]
i_centre_slice = i_centre - i_cutout_min
j_centre_slice = j_centre - j_cutout_min
if array[int(i_centre),int(j_centre)]!=array_slice[int(i_centre_slice),int(j_centre_slice)]:
if np.isnan(array[int(i_centre),int(j_centre)]==False) and np.isnan(array_slice[int(i_centre_slice),int(j_centre_slice)]==False):
print('SEVERE ERROR: AnnulusSum check failed.')
pdb.set_trace()
else:
array = array_slice
i_centre = i_centre_slice
j_centre = j_centre_slice
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj_inner = float(rad_inner)
semi_min_inner = float(semi_maj_inner) / float(axial_ratio)
semi_maj_outer = float(rad_inner) + float(width)
semi_min_outer = float(semi_maj_outer) / float(axial_ratio)
angle = np.radians(float(angle))
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within inner ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_inner = (j_trans**2 / semi_maj_inner**2) + (i_trans**2 / semi_min_inner**2 )
# Use meshgrids to create array identifying which coordinates lie within outer ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check_outer = (j_trans**2 / semi_maj_outer**2) + (i_trans**2 / semi_min_outer**2 )
# Calculate flux & pixels in aperture, and store pixel values
annulus_where = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==False) )
annulus_tot = sum( array[ annulus_where ] )
annulus_count = annulus_where[0].shape[0]
annulus_pix = array[ annulus_where ]
annulus_nan = np.where( (ellipse_check_outer<=1) & (ellipse_check_inner>1) & (np.isnan(array)==True) )
# Return results
return [annulus_tot, annulus_count, annulus_pix, annulus_nan]
def EllipseMask(array, rad, axial_ratio, angle, i_centre, j_centre):
'''
Function to return a mask identifying all pixels within an ellipse of given parameters
Args: Array, semi-major axis (pix), axial ratio, position angle (deg), i & j coords of centre of ellipse
Returns: Mask array of same dimensions as input array where pixels that lie within ellipse have value 1
'''
# Define semi-major & semi-minor axes, then convert input angle to radians
semi_maj = float(rad)
semi_min = float(rad) / float(axial_ratio)
if angle.dtype != 'float': angle = float(angle.value)
try:
if angle.unit == 'rad': pass
else: angle = np.radians(angle) # Convert the angle in radians
except: angle = np.radians(angle) # Vabbè, assumo che sia da convertire e sticazzi
# Create meshgrids with which to access i & j coordinates for ellipse calculations
i_linespace = np.linspace(0, array.shape[0]-1, array.shape[0])
j_linespace = np.linspace(0, array.shape[1]-1, array.shape[1])
i_grid, j_grid = np.meshgrid(i_linespace, j_linespace, indexing='ij')
# Use meshgrids to create array identifying which coordinates lie within ellipse
i_trans = -(j_grid-float(j_centre))*np.sin(angle) + (i_grid-float(i_centre))*np.cos(angle)
j_trans = (j_grid-float(j_centre))*np.cos(angle) + (i_grid-float(i_centre))*np.sin(angle)
ellipse_check = (j_trans**2 / semi_maj**2) + (i_trans**2 / semi_min**2 )
# Create ellipse mask
ellipse_mask = np.zeros([array.shape[0], array.shape[1]])
ellipse_mask[ np.where( ellipse_check<=1 ) ] = 1.0
# Return array
return ellipse_mask
def CircleSum(fits, i_centre, j_centre, r):
'''
Function to sum all pixel elements inside a given circle... the old-fashioned way
Args: Array to be used, i & j coordinates of centre of circle, radius of circle
Returns: Sum of elements within circle, number of pixels within circle
'''
i_centre, j_centre, r = int(i_centre), int(j_centre), int(r)
ap_sum = 0.0
ap_pix = 0.0
ap_values = []
for i in range(-r, r+1):
for j in range(-r, r+1):
if i**2.0 + j**2.0 <= r**2.0:
try:
ap_sum += fits[i_centre+i, j_centre+j]
ap_pix += 1.0
ap_values.append(fits[i_centre+i, j_centre+j])
except:
continue
return [ap_sum, ap_pix, ap_values]
|
nilq/baby-python
|
python
|
def model_to_dicts(Schema, model):
# 如果是分页器返回,需要传入model.items
common_schema = Schema(many=True) # 用已继承ma.ModelSchema类的自定制类生成序列化类
output = common_schema.dump(model) # 生成可序列化对象
return output
|
nilq/baby-python
|
python
|
"""OsservaPrezzi class for aio_osservaprezzi."""
from .const import ENDPOINT, REGIONS
from .models import Station
from .exceptions import (
RegionNotFoundException,
StationsNotFoundException,
OsservaPrezziConnectionError,
OsservaPrezziException,
)
from typing import Any
import asyncio
import aiohttp
import async_timeout
class OsservaPrezzi:
def __init__(
self,
parameters,
session: aiohttp.ClientSession = None,
request_timeout: int = 8,
) -> "OsservaPrezzi":
"""Initialize connection with OsservaPrezzi API."""
self._session = session
self._close_session = False
self.request_timeout = request_timeout
try:
self._parameters = f"region={REGIONS[parameters['region']]}\
&province={parameters['province']}\
&town={parameters['town']}\
&carb="
except KeyError as exception:
raise RegionNotFoundException(
"Error occurred while trying to find the region."
) from exception
async def _request(self) -> Any:
"""Handle a request to OsservaPrezzi API."""
method = "POST"
headers = {"Content-Type": "application/x-www-form-urlencoded"}
if self._session is None:
self._session = aiohttp.ClientSession()
self._close_session = True
try:
with async_timeout.timeout(self.request_timeout):
response = await self._session.request(
method, ENDPOINT, data=self._parameters, headers=headers,
)
response.raise_for_status()
except asyncio.TimeoutError as exception:
raise OsservaPrezziConnectionError(
"Timeout occurred while connecting to OsservaPrezzi."
) from exception
except (aiohttp.ClientError, aiohttp.ClientResponseError) as exception:
raise OsservaPrezziConnectionError(
"Error occurred while connecting to OsservaPrezzi."
) from exception
if "application/json" not in response.headers.get("Content-Type", ""):
raise OsservaPrezziException("Unexpected response from OsservaPrezzi.")
return (await response.json())["array"]
async def get_stations(self):
data = await self._request()
return [Station.from_dict(s) for s in data]
async def get_station_by_id(self, id):
stations = await self.get_stations()
try:
return next(filter(lambda d: d.id == id, stations))
except Exception:
raise StationsNotFoundException("Couldn't find specified station.")
async def close(self) -> None:
"""Close the session."""
if self._close_session and self._session:
await self._session.close()
async def __aenter__(self) -> "OsservaPrezzi":
"""Async enter."""
return self
async def __aexit__(self, *exc_info) -> None:
"""Async exit."""
await self.close()
|
nilq/baby-python
|
python
|
import telebot
import os
TOKEN = os.getenv('TELE_TOKEN')
bot = telebot.TeleBot(TOKEN)
@bot.message_handler(commands=['start'])
def start_message(message):
markup = telebot.types.ReplyKeyboardMarkup()
# start_btn = telebot.types.KeyboardButton("/start")
help_btn = telebot.types.KeyboardButton("/help")
markup.add(help_btn)
bot.send_message(message.chat.id, 'Привет! Это супер классный бот!', reply_markup=markup)
@bot.message_handler(commands=['help'])
def help_message(message):
bot.send_message(message.chat.id, 'Введите дату в формате ГГГГ-ММ-ДД')
@bot.message_handler(content_types=['text'])
def get_text_messages(message):
msg = message.text
bot.send_message(message.from_user.id, msg)
bot.polling(none_stop=True, interval=0)
|
nilq/baby-python
|
python
|
# [h] paint and arrange groups
'''Paint each group of glyphs in the font with a different color.'''
# debug
import hTools2
reload(hTools2)
if hTools2.DEBUG:
import hTools2.modules.color
reload(hTools2.modules.color)
# import
from hTools2.modules.color import paint_groups
# run
f = CurrentFont()
paint_groups(f)
|
nilq/baby-python
|
python
|
"""
3->7->5->12->None
"""
class SinglyListNode(object):
def __init__(self, value):
self.value = value
self.next = None
a = SinglyListNode(3)
b = SinglyListNode(7)
c = SinglyListNode(5)
d = SinglyListNode(12)
a.next = b
b.next = c
c.next = d
print(a.next)
print(b)
print(b.next)
print(c)
def iterate(head):
# goes through the linkedlist and whenever it sees next pointer as None, it stops
current = head
while current != None:
print(current.value)
current = current.next
iterate(a)
|
nilq/baby-python
|
python
|
from __future__ import absolute_import, unicode_literals
import logging
from django.core.management.base import BaseCommand
from housing_counselor.geocoder import BulkZipCodeGeocoder, GeocodedZipCodeCsv
logger = logging.getLogger(__name__)
class Command(BaseCommand):
help = 'Geocode all possible zipcodes'
def add_arguments(self, parser):
parser.add_argument('output_filename', help='output CSV filename')
parser.add_argument('-c', '--continue-file', action='store_true',
help='continue partially complete output file')
def handle(self, *args, **options):
output_filename = options['output_filename']
logger.info('geocoding zipcodes to %s', output_filename)
if options['continue_file']:
mode = 'a'
zipcodes = GeocodedZipCodeCsv.read(output_filename)
start = int(max(zipcodes.keys())) + 1
else:
mode = 'w'
start = 0
logger.info('starting geocoding at %s', start)
zipcodes = BulkZipCodeGeocoder().geocode_zipcodes(start=start)
with open(output_filename, mode) as f:
GeocodedZipCodeCsv.write(f, zipcodes)
|
nilq/baby-python
|
python
|
"""
Ejercicio 6
Escriba un programa que pida la fecha segun el formato 04/12/1973 y lo retome segun
el formato 1973/12/04
"""
from datetime import date
from datetime import datetime
fecha = str(input("Ingrese una fecha(formato dd/mm/aaaa): "))
fecha1 = datetime.strptime(fecha, "%d/%m/%Y")
fecha3 = datetime.strftime(fecha1, "%Y/%m/%d")
#print(fecha1)
print(fecha3)
|
nilq/baby-python
|
python
|
from .registries import Registry, meta_registry, QuerySet, Manager, MultipleObjectsReturned, DoesNotExist
__version__ = "0.2.1"
|
nilq/baby-python
|
python
|
# Copyright 2019 The MACE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import sys
from utils import device
from utils.util import MaceLogger
from utils.util import mace_check
def get_apu_version(enable_apu, android_ver, target_soc):
if enable_apu:
android_ver = (int)(android_ver)
if android_ver <= 10: # android Q
target_soc = target_soc.lower()
if target_soc.startswith("mt67"):
return 1
else:
return 2
elif android_ver == 11: # android R
target_soc = target_soc.lower()
if target_soc.startswith("mt689") or target_soc == "mt6877":
return 4
else:
return 3
else: # android S
return 4
return -1
def get_apu_so_paths_by_props(android_ver, target_soc):
so_path_array = []
apu_version = get_apu_version(True, android_ver, target_soc)
so_path = "third_party/apu/"
if apu_version == 1 or apu_version == 2:
if apu_version == 1:
so_path += "android_Q/mt67xx/"
else:
so_path += "android_Q/mt68xx/"
frontend_so_path = so_path + "%s/libapu-frontend.so" % target_soc
if not os.path.exists(frontend_so_path):
frontend_so_path = so_path + "libapu-frontend.so"
so_path_array.append(frontend_so_path)
so_path_array.append(so_path + "%s/libapu-platform.so" % target_soc)
elif apu_version == 3:
so_path += "android_R/"
# For android R except mt689x&mt6877
so_path_array.append(so_path + "libapu-apuwareapusys.mtk.so")
so_path_array.append(so_path + "libapu-apuwareutils.mtk.so")
so_path_array.append(so_path + "libapu-apuwarexrp.mtk.so")
so_path_array.append(so_path + "libapu-frontend.so")
so_path_array.append(so_path + "libapu-platform.so")
else: # For android S and mt689x&mt6877 on android R
mace_check(apu_version == 4, "Invalid apu verison")
return so_path_array
def get_apu_so_paths(android_device):
target_props = android_device.info()
target_soc = target_props["ro.board.platform"]
android_ver = (int)(target_props["ro.build.version.release"])
return get_apu_so_paths_by_props(android_ver, target_soc)
def parse_args():
base_parser = argparse.ArgumentParser(add_help=False)
base_parser.add_argument(
"--target_abi",
type=str,
default="arm64-v8a",
help="Target ABI: only support arm64-v8a"
)
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
version_parser = subparsers.add_parser(
'get-version',
parents=[base_parser],
help='get apu version')
version_parser.set_defaults(func=get_version)
copy_so_parser = subparsers.add_parser(
'copy-so-files',
parents=[base_parser],
help='copy apu files to apu_path')
copy_so_parser.add_argument(
"--apu_path",
type=str,
default="",
help="path for storing apu so files on device"
)
copy_so_parser.set_defaults(func=copy_so_files)
return parser.parse_known_args()
def get_cur_device_id(flags):
run_devices = device.choose_devices(flags.target_abi, "all")
run_device = None
device_num = len(run_devices)
if device_num == 0: # for CI
MaceLogger.warning("No Android devices are plugged in, "
"you need to copy `apu` so files by yourself.")
elif device_num > 1: # for CI
MaceLogger.warning("More than one Android devices are plugged in, "
"you need to copy `apu` so files by yourself.")
else:
run_device = run_devices[0]
return run_device
def get_version(flags):
device_id = get_cur_device_id(flags)
if device_id is not None:
android_device = device.create_device(flags.target_abi, device_id)
target_props = android_device.info()
target_soc = target_props["ro.board.platform"]
android_ver = (int)(target_props["ro.build.version.release"])
apu_version = get_apu_version(True, android_ver, target_soc)
else:
apu_version = 4
MaceLogger.warning("Can not get unique device ID, MACE select the"
" latest apu version: %s" % apu_version)
sys.exit(apu_version)
def copy_so_files(flags):
apu_so_paths = []
device_id = get_cur_device_id(flags)
if device_id is not None:
android_device = device.create_device(flags.target_abi, device_id)
apu_so_paths = get_apu_so_paths(android_device)
for apu_so_path in apu_so_paths:
device.execute("cp -f %s %s" % (apu_so_path, flags.apu_path), True)
if __name__ == "__main__":
flags, args = parse_args()
flags.func(flags)
|
nilq/baby-python
|
python
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Tests for the tag_linux.txt tagging file."""
import unittest
from plaso.containers import events
from plaso.lib import definitions
from plaso.parsers import bash_history
from plaso.parsers import docker
from plaso.parsers import dpkg
from plaso.parsers import selinux
from plaso.parsers import syslog
from plaso.parsers import utmp
from plaso.parsers import zsh_extended_history
from plaso.parsers.syslog_plugins import cron
from tests.data import test_lib
class LinuxTaggingFileTest(test_lib.TaggingFileTestCase):
"""Tests the tag_linux.txt tagging file.
In the tests below the EventData classes are used to catch failing tagging
rules in case event data types are renamed.
"""
_TAG_FILE = 'tag_linux.txt'
def testRuleApplicationExecution(self):
"""Tests the application_execution tagging rule."""
# Test: data_type is 'bash:history:command'
attribute_values_per_name = {}
self._CheckTaggingRule(
bash_history.BashHistoryEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'docker:json:layer'
attribute_values_per_name = {}
self._CheckTaggingRule(
docker.DockerJSONLayerEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'selinux:line' AND (audit_type is 'EXECVE' OR
# audit_type is 'USER_CMD')
attribute_values_per_name = {
'audit_type': ['EXECVE', 'USER_CMD']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'shell:zsh:history'
attribute_values_per_name = {}
self._CheckTaggingRule(
zsh_extended_history.ZshHistoryEventData, attribute_values_per_name,
['application_execution'])
# Test: data_type is 'syslog:cron:task_run'
attribute_values_per_name = {}
self._CheckTaggingRule(
cron.CronTaskRunEventData, attribute_values_per_name,
['application_execution'])
# Test: reporter is 'sudo' AND body contains 'COMMAND='
attribute_values_per_name = {
'body': ['test if my COMMAND=bogus'],
'reporter': ['sudo']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['application_execution'])
# Test: reporter is 'CROND' AND body contains 'CMD'
attribute_values_per_name = {
'body': ['test if my CMD bogus'],
'reporter': ['CROND']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['application_execution'])
def testRuleLogin(self):
"""Tests the login tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 7
attribute_values_per_name = {
'type': [7]}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name,
['login'])
# Test: data_type is 'selinux:line' AND audit_type is 'LOGIN'
attribute_values_per_name = {
'audit_type': ['LOGIN']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'login' AND (body contains 'logged in' OR
# body contains 'ROOT LOGIN' OR body contains 'session opened')
attribute_values_per_name = {
'body': ['logged in', 'ROOT LOGIN', 'session opened'],
'reporter': ['login']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'sshd' AND (body contains 'session opened' OR
# body contains 'Starting session')
attribute_values_per_name = {
'body': ['session opened', 'Starting session'],
'reporter': ['sshd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'dovecot' AND body contains 'imap-login: Login:'
attribute_values_per_name = {
'body': ['imap-login: Login:'],
'reporter': ['dovecot']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
# Test: reporter is 'postfix/submission/smtpd' AND body contains 'sasl_'
attribute_values_per_name = {
'body': ['sasl_method=PLAIN, sasl_username='],
'reporter': ['postfix/submission/smtpd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login'])
def testRuleLoginFailed(self):
"""Tests the login_failed tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_LOGIN_FAILURES'
attribute_values_per_name = {
'audit_type': ['ANOM_LOGIN_FAILURES']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login_failed'])
# Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGIN' AND
# body contains 'res=failed'
attribute_values_per_name = {
'audit_type': ['USER_LOGIN'],
'body': ['res=failed']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['login_failed'])
# Test: data_type is 'syslog:line' AND body contains 'pam_tally2'
attribute_values_per_name = {
'body': ['pam_tally2']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: (reporter is 'sshd' OR
# reporter is 'login' OR
# reporter is 'postfix/submission/smtpd' OR
# reporter is 'sudo') AND
# body contains 'uthentication fail'
attribute_values_per_name = {
'body': ['authentication failed', 'authentication failure',
'Authentication failure'],
'reporter': ['login', 'postfix/submission/smtpd', 'sshd', 'sudo']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: (reporter is 'xscreensaver' or
# reporter is 'login') AND
# body contains 'FAILED LOGIN'
attribute_values_per_name = {
'body': ['FAILED LOGIN'],
'reporter': ['login', 'xscreensaver']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: reporter is 'su' AND body contains 'DENIED'
attribute_values_per_name = {
'body': ['DENIED su from'],
'reporter': ['su']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
# Test: reporter is 'nologin'
attribute_values_per_name = {
'reporter': ['nologin']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['login_failed'])
def testRuleUserAdd(self):
"""Tests the useradd tagging rule."""
# Test: reporter is 'useradd' AND body contains 'new user'
attribute_values_per_name = {
'reporter': ['useradd'],
'body': ['new user']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['useradd'])
# Test: data_type is 'selinux:line' AND audit_type is 'ADD_USER'
attribute_values_per_name = {
'audit_type': ['ADD_USER']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['useradd'])
def testRuleGroupAdd(self):
"""Tests the groupadd tagging rule."""
# Test: reporter is 'useradd' AND body contains 'new group'
attribute_values_per_name = {
'reporter': ['useradd'],
'body': ['new group']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupadd'])
# Test: data_type is 'selinux:line' AND audit_type is 'ADD_GROUP'
attribute_values_per_name = {
'audit_type': ['ADD_GROUP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['groupadd'])
# Test: reporter is 'groupadd'
attribute_values_per_name = {
'reporter': ['groupadd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupadd'])
def testRuleUserDel(self):
"""Tests the userdel tagging rule."""
# Test: reporter is 'userdel' AND body contains 'delete user'
attribute_values_per_name = {
'reporter': ['userdel'],
'body': ['delete user']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['userdel'])
# Test: data_type is 'selinux:line' AND audit_type is 'DEL_USER'
attribute_values_per_name = {
'audit_type': ['DEL_USER']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['userdel'])
def testRuleGroupDel(self):
"""Tests the groupdel tagging rule."""
# Test: reporter is 'userdel' AND body contains 'removed group'
attribute_values_per_name = {
'reporter': ['userdel'],
'body': ['removed group']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupdel'])
# Test: data_type is 'selinux:line' AND audit_type is 'DEL_GROUP'
attribute_values_per_name = {
'audit_type': ['DEL_GROUP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['groupdel'])
# Test: reporter is 'groupdel'
attribute_values_per_name = {
'reporter': ['groupdel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['groupdel'])
def testRuleFirewallChange(self):
"""Tests the firewall_change tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'NETFILTER_CFG'
attribute_values_per_name = {
'audit_type': ['NETFILTER_CFG']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['firewall_change'])
def testRuleLogout(self):
"""Tests the logout tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 8 AND terminal != '' AND
# pid != 0
# Cannot use _CheckTaggingRule here because of terminal != ''
event = events.EventObject()
event.timestamp = self._TEST_TIMESTAMP
event.timestamp_desc = definitions.TIME_DESCRIPTION_UNKNOWN
event_data = utmp.UtmpEventData()
event_data.type = 0
event_data.terminal = 'tty1'
event_data.pid = 1
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.type = 8
event_data.terminal = ''
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.terminal = 'tty1'
event_data.pid = 0
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 0)
self._CheckLabels(storage_writer, [])
event_data.pid = 1
storage_writer = self._TagEvent(event, event_data, None)
self.assertEqual(storage_writer.number_of_event_tags, 1)
self._CheckLabels(storage_writer, ['logout'])
# Test: reporter is 'login' AND body contains 'session closed'
attribute_values_per_name = {
'body': ['session closed'],
'reporter': ['login']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'sshd' AND (body contains 'session closed' OR
# body contains 'Close session')
attribute_values_per_name = {
'body': ['Close session', 'session closed'],
'reporter': ['sshd']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'systemd-logind' AND body contains 'logged out'
attribute_values_per_name = {
'body': ['logged out'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: reporter is 'dovecot' AND body contains 'Logged out'
attribute_values_per_name = {
'body': ['Logged out'],
'reporter': ['dovecot']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['logout'])
# Test: data_type is 'selinux:line' AND audit_type is 'USER_LOGOUT'
attribute_values_per_name = {
'audit_type': ['USER_LOGOUT']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['logout'])
def testRuleSessionStart(self):
"""Tests the session_start tagging rule."""
# Test: reporter is 'systemd-logind' and body contains 'New session'
attribute_values_per_name = {
'body': ['New session'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['session_start'])
def testRuleSessionStop(self):
"""Tests the session_stop tagging rule."""
# Test: reporter is 'systemd-logind' and body contains 'Removed session'
attribute_values_per_name = {
'body': ['Removed session'],
'reporter': ['systemd-logind']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['session_stop'])
def testRuleBoot(self):
"""Tests the boot tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 2 AND
# terminal is 'system boot' AND username is 'reboot'
attribute_values_per_name = {
'terminal': ['system boot'],
'type': [2],
'username': ['reboot']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['boot'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_BOOT'
attribute_values_per_name = {
'audit_type': ['SYSTEM_BOOT']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['boot'])
def testRuleShutdown(self):
"""Tests the shutdonw tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 1 AND
# (terminal is '~~' OR terminal is 'system boot') AND
# username is 'shutdown'
attribute_values_per_name = {
'terminal': ['~~', 'system boot'],
'type': [1],
'username': ['shutdown']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['shutdown'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_SHUTDOWN'
attribute_values_per_name = {
'audit_type': ['SYSTEM_SHUTDOWN']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['shutdown'])
def testRuleRunlevel(self):
"""Tests the runlevel tagging rule."""
# Test: data_type is 'linux:utmp:event' AND type == 1 AND
# username is 'runlevel'
attribute_values_per_name = {
'type': [1],
'username': ['runlevel']}
self._CheckTaggingRule(
utmp.UtmpEventData, attribute_values_per_name, ['runlevel'])
# Test: data_type is 'selinux:line' AND audit_type is 'SYSTEM_RUNLEVEL'
attribute_values_per_name = {
'audit_type': ['SYSTEM_RUNLEVEL']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['runlevel'])
def testRuleDeviceConnection(self):
"""Tests the device_connection tagging rule."""
# Test: reporter is 'kernel' AND body contains 'New USB device found'
attribute_values_per_name = {
'body': ['New USB device found'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['device_connection'])
def testRuleDeviceDisconnection(self):
"""Tests the device_disconnection tagging rule."""
# Test: reporter is 'kernel' AND body contains 'USB disconnect'
attribute_values_per_name = {
'body': ['USB disconnect'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['device_disconnection'])
def testRuleApplicationInstall(self):
"""Tests the application_install tagging rule."""
# Test: data_type is 'dpkg:line' AND body contains 'status installed'
attribute_values_per_name = {
'body': ['status installed']}
self._CheckTaggingRule(
dpkg.DpkgEventData, attribute_values_per_name,
['application_install'])
def testRuleServiceStart(self):
"""Tests the service_start tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_START'
attribute_values_per_name = {
'audit_type': ['SERVICE_START']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['service_start'])
def testRuleServiceStop(self):
"""Tests the service_stop tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'SERVICE_STOP'
attribute_values_per_name = {
'audit_type': ['SERVICE_STOP']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['service_stop'])
def testRulePromiscuous(self):
"""Tests the promiscuous tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_PROMISCUOUS'
attribute_values_per_name = {
'audit_type': ['ANOM_PROMISCUOUS']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name,
['promiscuous'])
# Test: reporter is 'kernel' AND body contains 'promiscuous mode'
attribute_values_per_name = {
'body': ['promiscuous mode'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name,
['promiscuous'])
def testRuleCrach(self):
"""Tests the crash tagging rule."""
# Test: data_type is 'selinux:line' AND audit_type is 'ANOM_ABEND'
attribute_values_per_name = {
'audit_type': ['ANOM_ABEND']}
self._CheckTaggingRule(
selinux.SELinuxLogEventData, attribute_values_per_name, ['crash'])
# Test: reporter is 'kernel' AND body contains 'segfault'
attribute_values_per_name = {
'body': ['segfault'],
'reporter': ['kernel']}
self._CheckTaggingRule(
syslog.SyslogLineEventData, attribute_values_per_name, ['crash'])
if __name__ == '__main__':
unittest.main()
|
nilq/baby-python
|
python
|
__author__ = 'surya'
# plot each IntegralInteraction S values and save a plot in the end for the respective plate
def plot(file,nslen,slen):
import matplotlib.pyplot as plt
start=5
list=[]
with open(file+"_IntegralIntensity.txt") as files:
next(files)
for lines in files:
splits=lines.split("\t")
for i in range(start+nslen,start+nslen+slen):
list.append(float(splits[i].strip()))
plt.hist(list,50)
plt.xlabel('intensity')
plt.ylabel('frequency')
plt.title('Histogram distribution of S integral intesity')
plt.subplots_adjust(left=0.2)
plt.savefig(file+'.png')
plt.clf()
return file+'.png'
# gives a final plot for the number of positives found before and after the filtration of annotated entries that could
# be SD+; control+ or control- for example
def plotFinalPlt(path):
import matplotlib.pyplot as pltt
x=[]
y=[]
ys=[]
with open(path+".txt") as file:
next (file)
for line in file:
splits=line.split("\t")
x.append(splits[1].strip())
y.append(splits[4].strip())
ys.append(splits[2].strip())
pltt.plot(y,"ro-",ys,"bs-")
pltt.title('Significant interaction found for each plate')
pltt.xlabel('interaction')
pltt.ylabel('interactions')
pltt.xlim(-1,len(x))
mi=int(min(y))-2
ma=int(max(ys))+10
# pltt.ylim(mi,ma)
for i in range(0,len(x)):
pltt.annotate(x[i]+", " +y[i], xy=(i,y[i]),
arrowprops=dict(facecolor='green'),
)
pltt.savefig(path+'.png')
pltt.clf()
return file
##################################################################
## create a plot from the list of the values
def create_plot(x_list,nx_list,var):
import random
## select random numbers of the same length of NS
s_list=random.sample(x_list,len(nx_list))
num_bins=50
import matplotlib.pyplot as plt
plt.figure("Histogram distribution for "+var)
plt.subplot(211)
plt.title("Stimulating Integral Intensity")
plt.ylabel('frequency')
plt.hist(s_list, num_bins,facecolor='green')
plt.subplot(212)
plt.title("Non-Stimulating Integral Intensity")
plt.hist(nx_list, num_bins,facecolor='red')
plt.xlabel('intensity')
plt.ylabel('frequency')
# # Tweak spacing to prevent clipping of ylabel
# plt.subplots_adjust(left=0.15)
plt.show()
################################################################
### create a box plot
|
nilq/baby-python
|
python
|
from django.db import models
from django.utils import timezone
from django.utils.translation import ugettext_lazy as _
from django.db.models.signals import post_save
from django.dispatch import receiver
from django.contrib.postgres import fields as pgfields
from django.contrib.auth.models import User
# about user
# https://docs.djangoproject.com/en/1.11/topics/auth/customizing/#using-a-custom-user-model-when-starting-a-project
# https://simpleisbetterthancomplex.com/tutorial/2017/02/18/how-to-create-user-sign-up-view.html
class AppSettings:
"""Settings for whole applicaion"""
LANGS = {
'ru': 'Русский',
'en': 'English'
}
SET_TYPES = {
'by_stop': _('By finish'),
'by_start': _('By start')
}
@staticmethod
def get():
return dict(
min_weight=Set.MIN_WEIGHT,
max_weight=Set.MAX_WEIGHT,
min_reps=Set.MIN_REPS,
max_reps=Set.MAX_REPS,
langs=AppSettings.LANGS,
set_types=AppSettings.SET_TYPES
)
class UserSettings:
"""User settings are stored in profile"""
# https://docs.djangoproject.com/en/2.0/ref/contrib/postgres/fields/#django.contrib.postgres.fields.JSONField
# default must be callable
@staticmethod
def default():
return dict(
lang='ru',
set_type='by_stop',
set_weight=20,
set_reps=10
)
class Profile(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE)
email_confirmed = models.BooleanField(default=False)
settings = pgfields.JSONField(default=UserSettings.default)
# todo: investigate
# http://www.django-rest-framework.org/api-guide/serializers/#handling-saving-related-instances-in-model-manager-classes
@receiver(post_save, sender=User)
def update_user_profile(sender, instance, created, **kwargs):
if created:
Profile.objects.create(user=instance)
instance.profile.save()
class TrainingName(models.Model):
"""Название тренировки, общий, дополняемый список, для всех"""
text = models.CharField(_('name'), max_length=250, unique=True)
class Training(models.Model):
"""Тренировка"""
STARTED = 'st'
FINISHED = 'fn'
STATUSES = (
(STARTED, _('Started')),
(FINISHED, _('Finished'))
)
date = models.DateTimeField(_('date'), default=timezone.now)
status = models.CharField(
max_length=2,
choices=STATUSES,
default=STARTED
)
name = models.ForeignKey(
TrainingName,
on_delete=models.PROTECT,
verbose_name=_('name')
)
user = models.ForeignKey(
User,
on_delete=models.PROTECT,
related_name='trainings',
verbose_name=_('user')
)
def __str__(self):
return self.name
class Set(models.Model):
"""Подходы (вес, повторения, время)"""
MIN_WEIGHT = 1
MAX_WEIGHT = 600
MIN_REPS = 1
MAX_REPS = 999
weight = models.PositiveIntegerField(_('weight'))
reps = models.PositiveIntegerField(_('repetitions'))
started_at = models.DateTimeField(_('started at'), null=True)
"""Start time of set, value - if set is started manually, null if set is filled by end fact"""
# todo: validate no less than started (? and training date)
stopped_at = models.DateTimeField(_('stopped at'), default=timezone.now)
"""Stop time of set"""
training = models.ForeignKey(
Training,
on_delete=models.CASCADE,
related_name='sets',
verbose_name=_('training')
)
def __str__(self):
return '{} x{}'.format(self.weight, self.reps)
|
nilq/baby-python
|
python
|
import requests
from bs4 import BeautifulSoup
#define a founction that get text from a html page
def gettext(url, kv=None):
try:
r = requests.get(url,headers = kv)
r.raise_for_status()
r.encoding = r.apparent_encoding
return r.text
except:
print("Failure")
#define a founction that scrapy a photo
def scrapy_photo(url,file_name):
try:
r = requests.get(url)
r.encoding = r.apparent_encoding
print(r.status_code)
r.raise_for_status()
with open(file_name,'wb') as f:
f.write(r.content)
except:
print("error")
#get all of links in a html page
def get_img_url(w_url):
html = gettext(w_url,kv = {'user-agent':'Mozilla/5.0'})
soup = BeautifulSoup(html, 'lxml')
a = soup.find_all('img')
link = []
#get all links
for i in a:
link.append(i.attrs['src'])
return link
def main():
n = 1
url = input("please input a url of web:")
url_link = get_img_url(url)
for i in url_link:
file_name = "pic{}.jfif".format(i)
scrapy_photo(i,file_name)
n = n + 1
if __name__ == "__main__":
main()
|
nilq/baby-python
|
python
|
"""\
wxDatePickerCtrl objects
@copyright: 2002-2007 Alberto Griggio
@copyright: 2014-2016 Carsten Grohmann
@copyright: 2016 Dietmar Schwertberger
@license: MIT (see LICENSE.txt) - THIS PROGRAM COMES WITH NO WARRANTY
"""
import wx
from edit_windows import ManagedBase, EditStylesMixin
from tree import Node
import common, compat, config
import decorators
if compat.IS_PHOENIX:
#import wx.adv
from wx.adv import DatePickerCtrl
else:
#import wx.calendar
from wx import DatePickerCtrl
class EditDatePickerCtrl(ManagedBase, EditStylesMixin):
"Class to handle wxDatePickerCtrl objects"
# XXX unify with EditCalendarCtrl?
_PROPERTIES = ["Widget", "style"]
PROPERTIES = ManagedBase.PROPERTIES + _PROPERTIES + ManagedBase.EXTRA_PROPERTIES
def __init__(self, name, parent, id, sizer, pos):
# Initialise parent classes
ManagedBase.__init__(self, name, 'wxDatePickerCtrl', parent, id, sizer, pos)
EditStylesMixin.__init__(self)
def create_widget(self):
# TODO add all the other parameters for the DatePickerCtrl initial date
self.widget = DatePickerCtrl(self.parent.widget, self.id, style=self.style)
# handle compatibility:
@decorators.memoize
def wxname2attr(self, name):
cn = self.codegen.get_class(self.codegen.cn(name))
module = wx if compat.IS_CLASSIC else wx.adv
return getattr(module, cn)
def properties_changed(self, modified=None):
EditStylesMixin.properties_changed(self, modified)
ManagedBase.properties_changed(self, modified)
def builder(parent, sizer, pos, number=[1]):
"factory function for EditDatePickerCtrl objects"
label = 'datepicker_ctrl_%d' % number[0]
while common.app_tree.has_name(label):
number[0] += 1
label = 'datepicker_ctrl_%d' % number[0]
with parent.frozen():
datepicker_ctrl = EditDatePickerCtrl(label, parent, wx.NewId(), sizer, pos)
datepicker_ctrl.properties["style"].set_to_default()
datepicker_ctrl.check_defaults()
node = Node(datepicker_ctrl)
datepicker_ctrl.node = node
if parent.widget: datepicker_ctrl.create()
common.app_tree.insert(node, sizer.node, pos-1)
def xml_builder(attrs, parent, sizer, sizeritem, pos=None):
"factory to build EditDatePickerCtrl objects from a XML file"
from xml_parse import XmlParsingError
try:
label = attrs['name']
except KeyError:
raise XmlParsingError(_("'name' attribute missing"))
if sizer is None or sizeritem is None:
raise XmlParsingError(_("sizer or sizeritem object cannot be None"))
datepicker_ctrl = EditDatePickerCtrl(label, parent, wx.NewId(), sizer, pos)
#sizer.set_item(datepicker_ctrl.pos, proportion=sizeritem.proportion, span=sizeritem.span, flag=sizeritem.flag, border=sizeritem.border)
node = Node(datepicker_ctrl)
datepicker_ctrl.node = node
if pos is None:
common.app_tree.add(node, sizer.node)
else:
common.app_tree.insert(node, sizer.node, pos-1)
return datepicker_ctrl
def initialize():
"initialization function for the module: returns a wxBitmapButton to be added to the main palette"
common.widgets['EditDatePickerCtrl'] = builder
common.widgets_from_xml['EditDatePickerCtrl'] = xml_builder
return common.make_object_button('EditDatePickerCtrl', 'datepicker_ctrl.xpm')
|
nilq/baby-python
|
python
|
from util.html import HTML
import numpy as np
import os
import ntpath
import time
from . import util
import matplotlib.pyplot as plt
from util.util import load_validation_from_file, smooth_kernel, load_loss_from_file
class Visualizer():
"""This class includes several functions that can display/save images and print/save logging information.
It uses a Python library 'visdom' for display, and a Python library 'dominate' (wrapped in 'HTML') for creating HTML files with images.
"""
def __init__(self, opt):
"""Initialize the Visualizer class
Parameters:
opt -- stores all the experiment flags; needs to be a subclass of BaseOptions
Step 1: Cache the training/test options
Step 2: connect to a visdom server
Step 3: create an HTML object for saveing HTML filters
Step 4: create a logging file to store training losses
"""
self.opt = opt # cache the option
self.name = opt.name
if opt.isTrain:
# create a logging file to store training losses
self.loss_log = os.path.join(opt.checkpoints_dir, opt.name, 'loss_log.txt')
self.validation_log = os.path.join(opt.checkpoints_dir, opt.name, 'validation.txt')
self.training_log = os.path.join(opt.checkpoints_dir, opt.name, 'training.txt')
if opt.continue_train:
if os.path.isfile(self.loss_log):
self.plot_data = load_loss_from_file(self.loss_log)
if len(self.plot_data['legend']) == 0:
del self.plot_data
print('Loaded loss from', self.loss_log)
if os.path.isfile(self.validation_log):
self.validation_score = load_validation_from_file(self.validation_log)
print('Loaded validation scores from', self.validation_log)
if os.path.isfile(self.training_log):
self.traing_score = load_validation_from_file(self.training_log)
print('Loaded training scores from', self.training_log)
elif os.path.isfile(self.loss_log):
# Erase old content
open(self.loss_log, 'w').close()
open(self.validation_log, 'w').close()
open(self.training_log, 'w').close()
with open(self.loss_log, "a") as log_file:
now = time.strftime("%c")
log_file.write('================ Training Loss (%s) ================\n' % now)
def plot_current_losses(self):
"""display the current losses on visdom display: dictionary of error labels and values
Parameters:
epoch (int) -- current epoch
counter_ratio (float) -- progress (percentage) in the current epoch, between 0 to 1
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
"""
if not hasattr(self, 'figure'):
self.figure = plt.figure()
else:
plt.figure(self.figure.number)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title(self.name + ' loss over time')
# plt.yscale('symlog')
# plt.ylim((-50,80))
x = self.plot_data['X']
y = np.array(self.plot_data['Y']).transpose()
for i, loss in enumerate(y):
if i>=3:
break
plt.plot(x, loss, label=self.plot_data['legend'][i])
plt.legend()
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'loss.png')
plt.tight_layout()
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def plot_current_validation_score(self, score, total_iters):
with open(self.validation_log, 'a') as f:
f.write(', '.join(map(str, score))+'\n')
if not hasattr(self, 'validation_score'):
self.validation_score = []
self.validation_score.append(score)
if not hasattr(self, 'figure2'):
self.figure2 = plt.figure()
else:
plt.figure(self.figure2.number)
plt.xlabel('Iteration')
plt.ylabel('Mean Relative Error')
plt.title(self.name + ' validation error over time')
plt.ylim([0,max(1,np.amax(self.validation_score))])
step_size = int(total_iters/len(self.validation_score))
x = list(range(step_size, total_iters+1, step_size))
plt.plot(x, [0.15]*len(x), 'r--')
for i in range(len(score)):
plt.plot(x, np.array(self.validation_score)[:,i])
plt.legend(('15% error mark', *self.opt.physics_model.get_label_names()))
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'validation_score.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def plot_current_training_score(self, score, total_iters):
with open(self.training_log, 'a') as f:
f.write(', '.join(map(str, score))+'\n')
if not hasattr(self, 'training_score'):
self.training_score = []
self.training_score.append(score)
if not hasattr(self, 'figure2'):
self.figure2 = plt.figure()
else:
plt.figure(self.figure2.number)
plt.xlabel('Iteration')
plt.ylabel('Mean Relative Error')
plt.title(self.name + ' training error over time')
plt.ylim([0,max(1,np.amax(self.training_score))])
step_size = int(total_iters/len(self.training_score))
x = list(range(step_size, total_iters+1, step_size))
plt.plot(x, [0.15]*len(x), 'r--')
for i in range(len(score)):
plt.plot(x, np.array(self.training_score)[:,i])
plt.legend(('15% error mark', *self.opt.physics_model.get_label_names()))
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'training_score.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
# losses: same format as |losses| of plot_current_losses
def print_current_losses(self, epoch, iters, losses, t_comp, t_data, iter):
"""print current losses on console; also save the losses to the disk
Parameters:
epoch (int) -- current epoch
iters (int) -- current training iteration during this epoch (reset to 0 at the end of every epoch)
losses (OrderedDict) -- training losses stored in the format of (name, float) pairs
t_comp (float) -- computational time per data point (normalized by batch_size)
t_data (float) -- data loading time per data point (normalized by batch_size)
"""
if not hasattr(self, 'plot_data'):
self.plot_data = {'X': [], 'Y': [], 'legend': list(losses.keys())}
self.plot_data['X'].append(iter)
self.plot_data['Y'].append([losses[k].detach().cpu().numpy() for k in self.plot_data['legend']])
message = '(epoch: %d, iters: %d, time: %.3f, data: %.3f) ' % (epoch, iters, t_comp, t_data)
for k, v in losses.items():
message += '%s: %.3f ' % (k, v)
print(message) # print the message
with open(self.loss_log, "a") as log_file:
log_file.write('%s\n' % message) # save the message
def save_smooth_loss(self):
"""Stores the current loss as a png image.
"""
num_points = len(self.plot_data['Y'])
if not hasattr(self, 'figure'):
self.figure = plt.figure()
else:
plt.figure(self.figure.number)
plt.xlabel('Iterations')
plt.ylabel('Loss')
plt.title(self.name + ' loss over time')
# plt.yscale('symlog')
# plt.ylim((-50,80))
x = self.plot_data['X']
y_all = np.array(self.plot_data['Y']).transpose()
y = []
for y_i in y_all:
y.append(smooth_kernel(y_i))
x = np.linspace(x[0],x[-1],len(y[0]))
for i, loss in enumerate(y):
plt.plot(x, loss, label=self.plot_data['legend'][i])
plt.legend()
path = os.path.join(self.opt.checkpoints_dir, self.opt.name, 'loss_smooth.png')
plt.savefig(path, format='png', bbox_inches='tight')
plt.cla()
def save_images(webpage: HTML, visuals: dict, image_path: list, aspect_ratio=1.0, width=256):
"""Save images to the disk.
Parameters:
webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
visuals (OrderedDict) -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
image_path (str) -- the string is used to create image paths
aspect_ratio (float) -- the aspect ratio of saved images
width (int) -- the images will be resized to width x width
This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
"""
image_dir = webpage.get_image_dir()
short_path = ntpath.basename(image_path[0])
name = os.path.splitext(short_path)[0]
webpage.add_header(name)
ims, txts, links = [], [], []
for label, im in visuals.items():
if im is None:
continue
image_name = '%s_%s.png' % (name, label)
save_path = os.path.join(image_dir, image_name)
util.save_image(im, save_path, aspect_ratio=aspect_ratio)
ims.append(image_name)
txts.append(label)
links.append(image_name)
webpage.add_images(ims, txts, links, width=width)
|
nilq/baby-python
|
python
|
# Copyright 2009-2010 by Ka-Ping Yee
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
NOTE: THIS MODULE IS CURRENTLY UNUSED.
The current permissions scheme for resource finder is:
- Anyone (logged-in and non-logged-in users) can view and print
- Any logged-in user can edit data
THE CODE BELOW IS UNNECESSARY WITH THIS PERMISSION SCHEME
Handler for allowing an Account with 'grant' permission to grant access using
the permission scheme provided in access.py
"""
import logging
import model
import utils
from utils import DateTime, ErrorMessage, Redirect
from utils import db, html_escape, users, _
from access import check_action_permitted
class GrantAccess(utils.Handler):
def get(self):
"""Shows all access requests that are waiting for approval."""
self.require_action_permitted('grant')
q = model.Account.all().filter('requested_actions !=', None)
requests = []
for account in q.fetch(100):
for action in account.requested_actions:
if check_action_permitted(self.account, 'grant'):
requests.append({'email': account.email,
'requested_action': action,
'key': account.key()})
self.render('templates/grant_access.html',
requests=requests,
params=self.params,
grant_url=self.get_url('/grant_access'),
logout_url=users.create_logout_url('/'),
subdomain=self.subdomain)
def post(self):
"""Grants or denies a single request."""
action = self.request.get('action')
if not action:
raise ErrorMessage(404, 'missing action (requested_action) params')
self.require_action_permitted('grant')
account = model.Account.get(self.request.get('key'))
if not account:
raise ErrorMessage(404, 'bad key given')
#TODO(eyalf): define account.display_name() or something
name = account.email
if not action in account.requested_actions:
#i18n: Error message
raise ErrorMessage(404, _('No pending request for '
'%(account_action)s by %(user)s')
% (action, name))
account.requested_actions.remove(action)
grant = self.request.get('grant', 'deny')
if grant == 'approve':
account.actions.append(action)
account.put()
logging.info('%s request for %s was %s' % (account.email,
action,
grant))
if self.params.embed:
if grant == 'approve':
self.write(
#i18n: Application for the given permission action approved
_('Request for becoming %(action)s was approved.') % action)
else:
self.write(
#i18n: Application for the given permission action denied
_('Request for becoming %(action)s was denied.') % action)
else:
raise Redirect(self.get_url('/grant_access'))
if __name__ == '__main__':
utils.run([('/grant_access', GrantAccess)], debug=True)
|
nilq/baby-python
|
python
|
import pytest
import NAME
|
nilq/baby-python
|
python
|
import os
import numpy as np
import joblib
class proba_model_manager():
def __init__(self, static_data, params={}):
if len(params)>0:
self.params = params
self.test = params['test']
self.test_dir = os.path.join(self.model_dir, 'test_' + str(self.test))
self.istrained = False
self.method = 'mlp'
self.model_dir = os.path.join(static_data['path_model'], 'Probabilistic')
self.data_dir = self.static_data['path_data']
if hasattr(self, 'test'):
try:
self.load(self.test_dir)
except:
pass
else:
try:
self.load(self.model_dir)
except:
pass
self.static_data = static_data
self.cluster_name = static_data['_id']
self.rated = static_data['rated']
self.probabilistic = True
if not os.path.exists(self.model_dir):
os.makedirs(self.model_dir)
if not os.path.exists(self.test_dir):
os.makedirs(self.test_dir)
def load(self, path):
if os.path.exists(os.path.join(path, self.method + '.pickle')):
try:
tmp_dict = joblib.load(os.path.join(path, self.method + '.pickle'))
self.__dict__.update(tmp_dict)
except:
raise ImportError('Cannot open CNN model')
else:
raise ImportError('Cannot find CNN model')
|
nilq/baby-python
|
python
|
"""Test the cli_data_download tool outputs."""
# TODO review and edit this
import argparse
from pathlib import Path
from cmatools.cli_data_download import cli_data_download
from cmatools.definitions import SRC_DIR
DEBUG = True
"""bool: Debugging module-level constant (Default: True)."""
# Define cli filepath
CLI = Path(SRC_DIR, "cmatools", "cli_simple_analysis.py")
"""str: Filepath to command line tool module."""
# TODO mark as slow
# Keep this simple test, but mock so no actual download occurs
def test_cli_data_download():
"""Test for cli_data_download() function."""
parsed_args = argparse.Namespace(portal="CEDA", dataset="HADCRUT")
output = cli_data_download(parsed_args)
# Expect True, indicates download success
assert output is True
|
nilq/baby-python
|
python
|
import sys, json
from PIL import Image
from parser.png_diff import PNG_DIFF
from format.util import *
def diff(file_before, file_after):
"""diff png file
args:
file_before (str)
file_after (str)
returns:
png_diff (PNG_DIFF)
"""
png_before = Image.open(file_before)
png_after = Image.open(file_after)
png_diff = PNG_DIFF()
png_diff.diff(png_before, png_after)
return png_diff
def make_diff(file_before, file_after, file_output_name):
"""diff png file and save as file
args:
file_before (str)
file_after (str)
file_output_name (str)
returns:
saved_files (list)
"""
png_diff = diff(file_before, file_after)
saved_diff_images = create_diff_image("RGBA", tuple(png_diff.size[0]), png_diff.pixel_diff, file_output_name)
saved_diff_json = create_diff_json(png_diff, file_output_name)
saved_files = saved_diff_images
saved_files.append(saved_diff_json)
return saved_files
|
nilq/baby-python
|
python
|
# -*- coding: utf-8 -*-
# Check if the move of v can satisfied, makebetter, or notsatisfied
from .FMConstrMgr import FMConstrMgr
class FMBiConstrMgr(FMConstrMgr):
def select_togo(self):
"""[summary]
Returns:
dtype: description
"""
return 0 if self.diff[0] < self.diff[1] else 1
|
nilq/baby-python
|
python
|
from django.shortcuts import get_object_or_404, render
from .models import Card, Group, Product
def searching(request, keyword):
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
def index(request):
offers = Product.objects.filter(old_price__gte=1)[:3]
products = Product.objects.all()[:3]
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'offers': offers,
'products': products,
'keyword': keyword
}
return render(request, 'main/index.html', context)
def catalog(request):
offers = Product.objects.filter(old_price__gte=1)
products = Product.objects.all().order_by('price')
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'offers': offers,
'products': products,
'keyword': keyword
}
return render(request, 'main/catalog.html', context)
def group_list(request, slug):
group = get_object_or_404(Group, slug=slug)
products = group.products.all()
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'group': group,
'products': products,
'keyword': keyword
}
return render(request, 'main/group.html', context)
def cart(request):
products = Product.objects.all()[:10]
cards = Card.objects.all()
keyword = request.GET.get("q", None)
if keyword:
products = Product.objects.filter(title__contains=keyword)
return render(request, 'main/catalog.html', {'products': products, 'keyword': keyword})
context = {
'products': products,
'cards': cards,
'keyword': keyword
}
return render(request, 'main/cart.html', context)
|
nilq/baby-python
|
python
|
#
# project-k Forth kernel in python
# Use the same kernel code for all applications.
# FigTaiwan H.C. Chen hcchen5600@gmail.com 21:14 2017-07-31
#
import re, sys
name = "peforth"
vm = __import__(__name__)
major_version = 1; # major version, peforth.py kernel version, integer.
ip = 0;
stack = [] ;
rstack = [];
vocs = [];
words = {};
current = "forth";
context = "forth";
order = [context];
wordhash = {};
dictionary = [];
dictionary.append(0);
here = 1; # dictionary[0] is 0
tib = "";
ntib = 0;
RET = None; # The 'ret' instruction code. It marks the end of a colon word.
EXIT = ""; # The 'exit' instruction code.
compiling = False;
stop = False; # Stop the outer loop
newname = ""; # new word's name
newxt = None
newhelp = "";
# Reset the forth VM
def reset():
# defined in project-k kernel peforth.py
global rstack, compiling, ip, stop, ntip
# rstack = []; this creates extra error when return from the inner loop
compiling = False;
ip = 0; # forth VM instruction pointer
stop = True;
ntib = len(tib); # don't clear tib, a clue for debug.
# All peforth words are instances of this Word() constructor.
class Word:
def __init__(self, name, xt):
self.name = name
self.xt = xt
self.immediate = False
self.help = ""
self.comment = ""
def __str__(self): # return help message
return self.name + " " + self.help + ' __str__'
def __repr__(self): # execute xt and return help message
return "<Word '{}'>".format(self.name)
# returns the last defined word.
def last():
return words[current][-1]
# Get the word-list where new defined words are going to
def current_word_list():
return words[current]
# Get the word-list that is searched first.
def context_word_list():
return words[context]
# Get string from recent ntib down to, but not including, the next delimiter.
# Return result={str:"string", flag:boolean}
# If delimiter is not found then return the entire remaining TIB, multiple-lines,
# through result.str, purpose is to maximum the severity.
# result.flag indicates delimiter found or not found.
# o If you want to read the entire line in TIB, use nexttoken('\n|\r').
# nexttoken() skip the next character which is usually white space in Forth source code,
# e.g. s", this is reasonable because it's Forth. While the leading white space(s)
# will be included if useing the lower level nextstring('\\s') instead of nexttoken().
# o If you need to know whether the delimiter is found, use nextstring()。
# o result.str is "" if TIB has nothing left.
# o The ending delimiter is remained.
# o The delimiter is a regular expression.
def nextstring(deli):
# search for delimiter in tib from ntib
# index = tib[ntib:].find(deli) does not support regular expression, no good
global ntib
result = {}
try:
index = re.search(deli, tib[ntib:]).start() # start() triggers exception when not found
# see https://stackoverflow.com/questions/2674391/python-locating-the-position-of-a-regex-match-in-a-string
result['str'] = tib[ntib:ntib+index]; # found, index is the length
result['flag'] = True;
ntib += index; # Now ntib points at the delimiter.
except Exception:
result['str'] = tib[ntib:] # get the tib from ntib to EOL
result['flag'] = False;
ntib = len(tib) # skip to EOL
return result;
# Get next token which is found after the recent ntib of TIB.
# If delimiter is RegEx white-space ('\\s') or absent then skip all leading white spaces first.
# Usual case, skip the next character which should be a white space for Forth.
# But if delimiter is CRLF, which is to read the entire line, for blank lines the ending CRLF won't be skipped.
# o Return "" if TIB has nothing left.
# o Return the remaining TIB if delimiter is not found.
# o The ending delimiter is remained.
# o The delimiter is a regular expression.
def nexttoken(deli='\\s'):
global tib, ntib
if ntib >= len(tib): return ""
if deli == '\\s':
# skip all leading white spaces
while tib[ntib] in [" ","\t","\n","\r"]:
if (ntib+1) < len(tib):
ntib += 1
else:
break
elif deli in ['\\n','\n','\\r','\r','\\n|\\r','\n|\r','\\r|\\n', '\r|\n']:
# skip the next character that must be whitespace
if tib[ntib] not in ['\n','\r']:
# But don't skip the EOL itself!
ntib += 1
else:
# skip next character that must be whitespace
ntib += 1
token = nextstring(deli)['str'];
return token;
# tick() is same thing as forth word '。
# Letting words[voc][0]=0 also means tick() return 0 indicates "not found".
# Return the word obj of the given name or 0 if the word is not found.
# May be redefined for selftest to detect private words referenced by name.
# vm.tick keeps the original version.
def tick(name):
# defined in project-k peforth.py
if name in wordhash.keys():
return wordhash[name]
else:
return 0 # name not found
# Return a boolean.
# Is the new word reDef depends on only the words[current] word-list, not all
# word-lists, nor the word-hash table. Can't use tick() because tick() searches
# the word-hash that includes not only the words[current] word-list.
def isReDef(name):
result = False;
wordlist = current_word_list();
for i in range(1,len(wordlist)): # skip [0] which is 0
if wordlist[i].name == name :
result = True;
break;
return result;
# comma(x) compiles anything into dictionary[here]. x can be number, string,
# function, object, array .. etc。
# To compile a word, comma(tick('word-name'))
def comma(x):
global dictionary, here
try:
dictionary[here], here = x , here + 1
except:
dictionary.append(x)
here += 1
# dummy RET
try:
dictionary[here] = RET
except:
dictionary.append(RET)
# [here] will be overwritten, we do this dummy because
# RET is the ending mark for 'see' to know where to stop.
'''
Discussions:
'address' or 'ip' are index of dictionary[] array. dictionary[] is the memory of the
Forth virtual machine.
execute() executes a function, a word "name", and a word Object.
inner(entry) jumps into the entry address. The TOS of return stack can be 0, in that
case the control will return back to python host, or the return address.
inner() used in outer(), and colon word's xt() while execute() is used everywhere.
We have 3 ways to call forth words from Python: 1. execute('word'),
2. dictate('word word word'), and 3. inner(cfa).
dictate() cycles are stand alone tasks. We can suspend an in-completed dictate() and we
can also run another dictate() within a dictate().
The ultimate inner loop is like this: while(w){ip++; w.xt(); w=dictionary[ip]};
Boolean(w) == False is the break condition. So I choose None to be the RET instruction
and the empty string "" to be the EXIT instruction. Choices are None, "", [], {}, False,
and 0. While 0 neas 'suspend' the inner loop.
To suspend the Forth virtual machine means to stop inner loop but not pop the
return stack, resume is possible because return stack remained. We need an instruction
to do this and it's 0. dictionary[0] and words[<vid>][0] are always 0 thus ip=w=0
indicates that case. Calling inner loop from outer loop needs to push(0) first so
as to balance the return stack also letting the 0 instruction to stop popping the
return stack because there's no more return address, it's outer interpreter remember?
'''
# -------------------- ###### The inner loop ###### -------------------------------------
# Translate all possible entry or input to the suitable word type.
def phaseA (entry):
global ip
w = 0;
if type(entry)==str:
# "string" is word name
w = tick(entry.strip()); # remove leading and tailing white spaces
elif (type(entry)==Word or callable(entry)) : # function, Word
w = entry;
elif type(entry)==int:
# number could be dictionary entry or 0.
# could be does> branch entry or popped from return stack by RET or EXIT instruction.
ip = entry;
w = dictionary[ip];
else:
panic("Error! execute() doesn't know how to handle this thing : "+entry+" ("+type(entry)+")\n","err");
return w;
# Execute the given w by the correct method
def phaseB(w):
global ip, rstack
if type(w)==Word: # Word object
try:
w.xt(w)
except Exception as err:
panic("Word in phaseB {}: {}\nBody:\n{}".format(repr(w),err,w.xt.__doc__))
elif callable(w) : # a function
try:
w();
except Exception as err:
panic("Callable in phaseB {}: {}\nBody:\n{}".format(repr(w),err,w.__doc__))
elif str(type(w))=="<class 'code'>": # code object
exec(w)
elif type(w)==int:
# Usually a number is the entry of does>. Can't use inner() to call it
# The below push-jump mimics the call instruction of a CPU.
rstack.append(ip); # Forth ip is the "next" instruction to be executed. Push return address.
ip = w; # jump
else:
panic("Error! don't know how to execute : "+w+" ("+type(w)+")\n","error");
# execute("unknown") == do nothing, this is beneficial when executing a future word
# May be redefined for selftest to detect private words called by name.
# vm.execute keeps the original version.
def execute(entry):
# defined in proejct-k peforth.py
w = phaseA(entry)
if w:
if type(w) in [int, float]:
panic("Error! please use inner("+w+") instead of execute("+w+").\n","severe");
else:
phaseB(w);
return(vm) # support function cascade
else:
panic(entry + " unknown!")
# FORTH inner loop of project-k VM
def inner(entry, resuming=None):
# defined in project-k kernel peforth.py
global ip
w = phaseA(entry);
while not stop:
while w: # this is the very inner loop
ip += 1 # Forth general rule. IP points to the *next* word.
phaseB(w) # execute it
w = dictionary[ip] # get next word
if (w==0):
break; # w==0 is suspend, break inner loop but reserve rstack.
else:
ip = rstack.pop(); # w is either ret(None) or exit(""), return to caller, or 0 when resuming through outer(entry)
if(resuming):
w = dictionary[ip]; # Higher level of inner()'s have been terminated by suspend, do their job.
if not (ip and resuming):
break # Resuming inner loop. ip==0 means resuming has done。
### End of the inner loop ###
# FORTH outer loop of project-k VM
# If entry is given then resume from the entry point by executing
# the remaining colon thread down until ip reaches 0, that's resume.
# Then proceed with the tib/ntib string.
def outer(entry=None):
# Handle one token.
def outerExecute(token):
w = tick(token); # not found is 0. w is an Word object.
if (w) :
if(not compiling): # interpret state or immediate words
if getattr(w,'compileonly',False):
panic(
"Error! "+token+" is compile-only.",
len(tib)-ntib>100 # error or warning? depends
);
return;
execute(w);
else: # compile state
if (w.immediate) :
execute(w); # Not inner(w);
else:
if getattr(w,'interpretonly',False):
panic(
"Error! "+token+" is interpret-only.",
len(tib)-ntib>100 # error or warning? depends
);
return;
comma(w); # compile w into dictionary. w is a Word() object
else:
# token is unknown or number
# This line: f = float(token) makes problems try-except can not catch
def is_number(s):
# https://stackoverflow.com/questions/354038/how-do-i-check-if-a-string-is-a-number-float
try:
complex(s) # for int, float and complex
except ValueError:
return False
return True
n = None #
if is_number(token):
# token is (int, float, complex) we ignore complex so far
f = complex(token).real
i = int(f)
if i==f:
n = i
else:
n = f
else:
# token is unknown or (hex, oct, binary)
def panic_unknown():
panic(
"Error! "+token+" unknown.\n",
len(tib)-ntib>100 # error or warning? depends
);
try:
# token is a number
if token[:2] in ["0x","0X"]:
n = int(token,base=16)
elif token[:2] in ["0o","0O"]:
n = int(token,base=8)
elif token[:2] in ["0b","0B"]:
n = int(token,base=2)
else:
if not push(token).execute("unknown").pop():
panic_unknown()
except Exception as err:
if not push(token).execute("unknown").pop():
panic_unknown()
if n != None :
push(n)
if (compiling):
execute("literal");
if (entry):
inner(entry, True); # resume from the breakpoint
while(not stop):
token = nexttoken();
if (token==""):
break; # TIB done, loop exit.
outerExecute(token);
### End of the outer loop ###
# Generates the .xt() function of all code words.
# Python does not support annonymous function so we use genxt() instead.
# _me argument refers to the word object itself, if you need to access
# any attribute of the word.
# xt.__doc__ keeps the source code.
# py: help(genxt) to read me.
def genxt(name, body):
ll = {}
# _me will be the code word object itself.
source = "def xt(_me=None): ### {} ###"
if tick('-indent') and tick('indent'):
# Beautify source code if -indent and indent are defined
push(body);execute('-indent');execute('indent')
body = pop()
if body.strip()=="":
source = (source+"\n pass\n").format(name)
else:
source = (source+'\n{}').format(name,body)
try:
exec(source,globals(),ll)
except Exception as err:
panic("Failed in genxt({},Body) : {}\nBody:\n{}".format(name, err, body))
ll['xt'].__doc__ = source
ll['xt'].name = name
return ll['xt']
# Python does not support annoymous function, this can be recovered by
# using closure. genfunc("body","args","name") returns a function which
# is composed by the given function name, source code and arguments.
# <name>.__doc__ keeps the source code.
# py: help(genfunc) to read me.
def genfunc(body,args,name):
local = {}
source = "def {}({}):".format(name,args)
# args can be "", or 'x, y=123,z=None'
if body.strip()=="":
source = source+"\n pass\n";
else:
source = (source+'\n{}').format(body)
exec(source,globals(),local)
local[name].__doc__ = source
return local[name]
# The basic FORTH word 'code's run time.
def docode(_me=None):
# All future code words can see local variables in here, for jeforth.3we.
# [x] check if this is true for python, <== Not True for Python.
global compiling, newname, newxt, newhelp, ntib
newname = nexttoken();
if isReDef(newname): # don't use tick(newname), it's wrong.
print("reDef " + newname);
# get code body
push(nextstring("end-code"));
if tos()['flag']:
compiling = "code"; # it's true and a clue of compiling a code word.
newxt = genxt(newname, pop()['str'])
else:
panic("Error! expecting 'end-code'.");
reset();
code = Word('code', docode)
code.vid = 'forth'
code.wid = 1
code.type = 'code'
code.help = '( <name> -- ) Start composing a code word.'
# The basic FORTH word 'end-code's run time.
def doendcode(_me=None):
global compiling
if compiling!="code":
panic("Error! 'end-code' a none code word.")
current_word_list().append(Word(newname,newxt))
last().vid = current;
last().wid = len(current_word_list())-1;
last().type = 'code';
# ---------
mm = re.match(r"^.*?#\s*(.*)$", last().xt.__doc__.split('\n')[1])
last().help = mm.groups()[0] if mm and mm.groups()[0] else ""
# ---------
wordhash[last().name] = last();
compiling = False;
endcode = Word('end-code', doendcode)
endcode.vid = 'forth'
endcode.wid = 2
endcode.type = 'code'
endcode.immediate = True
endcode.compileonly = True
endcode.help = '( -- ) Wrap up the new code word.'
# forth master word-list
# Letting current_word_list()[0] == 0 has many advantages. When tick('name')
# returns a 0, current_word_list()[0] is 0 too, indicates a not-found.
words[current] = [0,code,endcode]
# Find a word as soon as possible.
wordhash = {"code":current_word_list()[1], "end-code":current_word_list()[2]};
# Command interface to the project-k VM.
# The input can be multiple lines or an entire ~.f file.
# Yet it usually is the TIB (Terminal input buffer).
def dictate(input):
global tib, ntib, ip, stop
tibwas = tib
ntibwas = ntib
ipwas = ip
tib = input;
ntib = 0;
stop = False; # stop outer loop
outer();
tib = tibwas;
ntib = ntibwas;
ip = ipwas;
return(vm) # support function cascade
# -------------------- end of main() -----------------------------------------
# Top of Stack access easier. ( tos(2) tos(1) tos(void|0) -- ditto )
# tos(i,new) returns tos(i) and by the way change tos(i) to new value this is good
# for counting up or down in a loop.
def tos(index=None,value=None):
global stack
if index==None:
return stack[-1]
elif value==None:
return stack[len(stack)-1-index];
else:
data = stack[len(stack)-1-index];
stack[len(stack)-1-index] = value;
return(data);
# Top of return Stack access easier. ( rtos(2) rtos(1) rtos(void|0) -- ditto )
# rtos(i,new) returns rtos(i) and by the way change rtos(i) to new value this is good
# for counting up or down in a loop.
def rtos(index=None,value=None):
global rstack
if index==None:
return rstack[-1]
elif value==None:
return rstack[len(rstack)-1-index];
else:
data = rstack[len(rstack)-1-index];
rstack[len(rstack)-1-index] = value;
return(data);
# rstack access easier. e.g. rpop(1) gets rtos(1)
# ( rtos(2) rtos(1) rtos(0) -- rtos(2) rtos(0) )
# push(formula(rpop(i)),i-1) manipulates the rtos(i) directly, usually when i is the index
# of a loop.
def rpop(index=None):
if index==None:
return rstack.pop();
else:
return rstack.pop(len(rstack)-1-index);
# Stack access easier. e.g. pop(1) gets tos(1) ( tos(2) tos(1) tos(0) -- tos(2) tos(0) )
# push(formula(pop(i)),i-1) manipulate the tos(i) directly, when i is the index of a loop.
def pop(index=None):
if index==None:
return stack.pop();
else:
return stack.pop(len(stack)-1-index);
# Stack access easier. e.g. push(data,1) inserts data to tos(1),
# ( tos2 tos1 tos -- tos2 tos1 data tos )
# push(formula(pop(i)),i-1) manipulate the tos(i) directly, usually when i
# is the index of a loop.
def push(data=None, index=None):
global stack
if index==None:
stack.append(data);
else:
stack.insert(len(stack)-1-index,data);
return(vm) # support function cascade
# ---- end of projectk.py ----
|
nilq/baby-python
|
python
|
from wallet import Wallet
wallet = Wallet()
address = wallet.getnewaddress()
print address
|
nilq/baby-python
|
python
|
from config.settings_base import *
##### EDIT BELOW
API_KEY = "Paste your key in between these quotation marks"
|
nilq/baby-python
|
python
|
from rest_framework import serializers
from .models import Homework
class HomeworkStudentSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
course = serializers.ReadOnlyField(source='course.id')
lecture = serializers.ReadOnlyField(source='lecture.id')
grade = serializers.ReadOnlyField(read_only=True)
comments = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Homework
fields = ['id', 'created', 'owner', 'course', 'lecture', 'hometask', 'url', 'grade', 'comments']
class HomeworkTeacherSerializer(serializers.ModelSerializer):
owner = serializers.ReadOnlyField(source='owner.username')
course = serializers.ReadOnlyField(source='course.id')
lecture = serializers.ReadOnlyField(source='lecture.id')
hometask = serializers.PrimaryKeyRelatedField(read_only=True)
url = serializers.ReadOnlyField(read_only=True)
comments = serializers.PrimaryKeyRelatedField(many=True, read_only=True)
class Meta:
model = Homework
fields = ['id', 'created', 'owner', 'course', 'lecture', 'hometask', 'url', 'grade', 'comments']
|
nilq/baby-python
|
python
|
#!/usr/bin/env python
'''
Copyright (C) 2019, WAFW00F Developers.
See the LICENSE file for copying permission.
'''
NAME = 'Open-Resty Lua Nginx (FLOSS)'
def is_waf(self):
schema1 = [
self.matchHeader(('Server', r'^openresty/[0-9\.]+?')),
self.matchStatus(403)
]
schema2 = [
self.matchContent(r'openresty/[0-9\.]+?'),
self.matchStatus(406)
]
if all(i for i in schema1):
return True
if all(i for i in schema2):
return True
return False
|
nilq/baby-python
|
python
|
from pypy.rlib import _rffi_stacklet as _c
from pypy.rlib import objectmodel, debug
from pypy.rpython.annlowlevel import llhelper
from pypy.tool.staticmethods import StaticMethods
class StackletGcRootFinder:
__metaclass__ = StaticMethods
def new(thrd, callback, arg):
h = _c.new(thrd._thrd, llhelper(_c.run_fn, callback), arg)
if not h:
raise MemoryError
return h
new._annspecialcase_ = 'specialize:arg(1)'
def switch(thrd, h):
h = _c.switch(thrd._thrd, h)
if not h:
raise MemoryError
return h
def destroy(thrd, h):
_c.destroy(thrd._thrd, h)
if objectmodel.we_are_translated():
debug.debug_print("not using a framework GC: "
"stacklet_destroy() may leak")
is_empty_handle = _c.is_empty_handle
def get_null_handle():
return _c.null_handle
gcrootfinder = StackletGcRootFinder # class object
|
nilq/baby-python
|
python
|
# coding: utf-8
from __future__ import unicode_literals
import re
from django import forms
from django.core.paginator import Paginator, PageNotAnInteger, EmptyPage
from django.utils.encoding import iri_to_uri, smart_text
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
__all__ = ['CategoryChoiceField', 'build_absolute_uri']
absolute_http_url_re = re.compile(r'^https?://', re.I)
class CategoryChoiceField(forms.ModelChoiceField):
def label_from_instance(self, obj):
# pylint: disable=W0212
level = getattr(obj, obj._mptt_meta.level_attr)
indent = max(0, level - 1) * '│'
if obj.parent:
last = ((obj.parent.rght - obj.rght == 1) and
(obj.rght - obj.lft == 1))
if last:
indent += '└ '
else:
indent += '├ '
return '%s%s' % (indent, smart_text(obj))
def build_absolute_uri(location, is_secure=False):
from django.contrib.sites.models import Site
site = Site.objects.get_current()
host = site.domain
if not absolute_http_url_re.match(location):
current_uri = '%s://%s' % ('https' if is_secure else 'http', host)
location = urljoin(current_uri, location)
return iri_to_uri(location)
def get_paginator_items(items, paginate_by, page):
paginator = Paginator(items, paginate_by)
try:
items = paginator.page(page)
except PageNotAnInteger:
items = paginator.page(1)
except EmptyPage:
items = paginator.page(paginator.num_pages)
return items
|
nilq/baby-python
|
python
|
from typing import List
import torch
import numpy as np
# details about math operation in torch can be found in: http://pytorch.org/docs/torch.html#math-operations
# convert numpy to tensor or vise versa
np_data = np.arange(6).reshape((2, 3)) # reshape 重塑 把1X6矩阵变为2X3矩阵
# numpy.arange([start=0, ]stop, [step=1, ]dtype=None) np.arange(6) ->[0 1 2 3 4 5]
torch_data = torch.from_numpy(np_data)
tensor2array = torch_data.numpy()
print(
'\nnumpy array:', np_data, # [[0 1 2], [3 4 5]]
'\ntorch tensor:', torch_data, # 0 1 2 \n 3 4 5 [torch.LongTensor of size 2x3]
'\ntensor to array:', tensor2array, # [[0 1 2], [3 4 5]]
)
# abs
data: List[int] = [-1, -2, 1, 2]
tensor = torch.FloatTensor(data) # 32-bit floating point
print(
'\nabs',
'\nnumpy: ', np.abs(data), # [1 2 1 2]
'\ntorch: ', torch.abs(tensor) # [1 2 1 2]
)
# sin
print(
'\nsin',
'\nnumpy: ', np.sin(data), # [-0.84147098 -0.90929743 0.84147098 0.90929743]
'\ntorch: ', torch.sin(tensor) # [-0.8415 -0.9093 0.8415 0.9093]
)
# mean
print(
'\nmean',
'\nnumpy: ', np.mean(data), # 0.0
'\ntorch: ', torch.mean(tensor) # 0.0
)
# matrix multiplication
data2 = [[1, 2], [3, 4]]
tensor = torch.FloatTensor(data2) # 32-bit floating point
# correct method
print(
'\nmatrix multiplication (matmul)',
'\nnumpy: ', np.matmul(data2, data2), # [[7, 10], [15, 22]]
'\ntorch: ', torch.mm(tensor, tensor) # [[7, 10], [15, 22]]
)
'''
点乘是对应位置元素相乘,要求两矩阵必须尺寸相同;
叉乘是矩阵a的第一行乘以矩阵b的第一列,各个元素对应相乘然后求和作为第一元素的值,
要求矩阵a的列数等于矩阵b的行数,乘积矩阵的行数等于左边矩阵的行数,乘积矩阵的列数等于右边矩阵的列数。
所以正确的说法应该是:
numpy.matmul() 和torch.mm() 是矩阵乘法(叉乘),
numpy.multiply() 和 torch.mul() 是矩阵点乘(对应元素相乘)
'''
# incorrect method
# data2 = np.array(data2)
# print(
# '\nmatrix multiplication (dot)',
# '\nnumpy: ', data.dot(data2), # [[7, 10], [15, 22]]
# '\ntorch: ', tensor.dot(tensor) # this will convert tensor to [1,2,3,4], you'll get 30.0
# )
|
nilq/baby-python
|
python
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.