index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
987,900 | 64525f8c1e9755c950d60e3dec223c83f061e3fd | #!/usr/bin/env python
"""
Copyright (c) 2006-2019 sqlmap developers (http://sqlmap.org/)
See the file 'LICENSE' for copying permission
"""
import httplib
import re
import socket
import urllib
import urllib2
from lib.core.common import getSafeExString
from lib.core.common import getUnicode
from lib.core.common import popValue
from lib.core.common import pushValue
from lib.core.common import readInput
from lib.core.common import urlencode
from lib.core.data import conf
from lib.core.data import kb
from lib.core.data import logger
from lib.core.decorators import stackedmethod
from lib.core.enums import CUSTOM_LOGGING
from lib.core.enums import HTTP_HEADER
from lib.core.enums import REDIRECTION
from lib.core.exception import SqlmapBaseException
from lib.core.exception import SqlmapConnectionException
from lib.core.exception import SqlmapUserQuitException
from lib.core.settings import BING_REGEX
from lib.core.settings import DUMMY_SEARCH_USER_AGENT
from lib.core.settings import DUCKDUCKGO_REGEX
from lib.core.settings import GOOGLE_REGEX
from lib.core.settings import HTTP_ACCEPT_ENCODING_HEADER_VALUE
from lib.core.settings import UNICODE_ENCODING
from lib.request.basic import decodePage
from thirdparty.socks import socks
def _search(dork):
"""
This method performs the effective search on Google providing
the google dork and the Google session cookie
"""
if not dork:
return None
data = None
headers = {}
headers[HTTP_HEADER.USER_AGENT] = dict(conf.httpHeaders).get(HTTP_HEADER.USER_AGENT, DUMMY_SEARCH_USER_AGENT)
headers[HTTP_HEADER.ACCEPT_ENCODING] = HTTP_ACCEPT_ENCODING_HEADER_VALUE
try:
req = urllib2.Request("https://www.google.com/ncr", headers=headers)
conn = urllib2.urlopen(req)
except Exception as ex:
errMsg = "unable to connect to Google ('%s')" % getSafeExString(ex)
raise SqlmapConnectionException(errMsg)
gpage = conf.googlePage if conf.googlePage > 1 else 1
logger.info("using search result page #%d" % gpage)
url = "https://www.google.com/search?"
url += "q=%s&" % urlencode(dork, convall=True)
url += "num=100&hl=en&complete=0&safe=off&filter=0&btnG=Search"
url += "&start=%d" % ((gpage - 1) * 100)
try:
req = urllib2.Request(url, headers=headers)
conn = urllib2.urlopen(req)
requestMsg = "HTTP request:\nGET %s" % url
requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str
logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)
page = conn.read()
code = conn.code
status = conn.msg
responseHeaders = conn.info()
page = decodePage(page, responseHeaders.get("Content-Encoding"), responseHeaders.get("Content-Type"))
responseMsg = "HTTP response (%s - %d):\n" % (status, code)
if conf.verbose <= 4:
responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING)
elif conf.verbose > 4:
responseMsg += "%s\n%s\n" % (responseHeaders, page)
logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)
except urllib2.HTTPError as ex:
try:
page = ex.read()
except Exception as _:
warnMsg = "problem occurred while trying to get "
warnMsg += "an error page information (%s)" % getSafeExString(_)
logger.critical(warnMsg)
return None
except (urllib2.URLError, httplib.error, socket.error, socket.timeout, socks.ProxyError):
errMsg = "unable to connect to Google"
raise SqlmapConnectionException(errMsg)
retVal = [urllib.unquote(match.group(1) or match.group(2)) for match in re.finditer(GOOGLE_REGEX, page, re.I)]
if not retVal and "detected unusual traffic" in page:
warnMsg = "Google has detected 'unusual' traffic from "
warnMsg += "used IP address disabling further searches"
if conf.proxyList:
raise SqlmapBaseException(warnMsg)
else:
logger.critical(warnMsg)
if not retVal:
message = "no usable links found. What do you want to do?"
message += "\n[1] (re)try with DuckDuckGo (default)"
message += "\n[2] (re)try with Bing"
message += "\n[3] quit"
choice = readInput(message, default='1')
if choice == '3':
raise SqlmapUserQuitException
elif choice == '2':
url = "https://www.bing.com/search?q=%s&first=%d" % (urlencode(dork, convall=True), (gpage - 1) * 10 + 1)
regex = BING_REGEX
else:
url = "https://duckduckgo.com/html/"
data = "q=%s&s=%d" % (urlencode(dork, convall=True), (gpage - 1) * 30)
regex = DUCKDUCKGO_REGEX
try:
req = urllib2.Request(url, data=data, headers=headers)
conn = urllib2.urlopen(req)
requestMsg = "HTTP request:\nGET %s" % url
requestMsg += " %s" % httplib.HTTPConnection._http_vsn_str
logger.log(CUSTOM_LOGGING.TRAFFIC_OUT, requestMsg)
page = conn.read()
code = conn.code
status = conn.msg
responseHeaders = conn.info()
page = decodePage(page, responseHeaders.get("Content-Encoding"), responseHeaders.get("Content-Type"))
responseMsg = "HTTP response (%s - %d):\n" % (status, code)
if conf.verbose <= 4:
responseMsg += getUnicode(responseHeaders, UNICODE_ENCODING)
elif conf.verbose > 4:
responseMsg += "%s\n%s\n" % (responseHeaders, page)
logger.log(CUSTOM_LOGGING.TRAFFIC_IN, responseMsg)
except urllib2.HTTPError as ex:
try:
page = ex.read()
page = decodePage(page, ex.headers.get("Content-Encoding"), ex.headers.get("Content-Type"))
except socket.timeout:
warnMsg = "connection timed out while trying "
warnMsg += "to get error page information (%d)" % ex.code
logger.critical(warnMsg)
return None
except:
errMsg = "unable to connect"
raise SqlmapConnectionException(errMsg)
retVal = [urllib.unquote(match.group(1).replace("&", "&")) for match in re.finditer(regex, page, re.I | re.S)]
if not retVal and "issue with the Tor Exit Node you are currently using" in page:
warnMsg = "DuckDuckGo has detected 'unusual' traffic from "
warnMsg += "used (Tor) IP address"
if conf.proxyList:
raise SqlmapBaseException(warnMsg)
else:
logger.critical(warnMsg)
return retVal
@stackedmethod
def search(dork):
pushValue(kb.redirectChoice)
kb.redirectChoice = REDIRECTION.YES
try:
return _search(dork)
except SqlmapBaseException as ex:
if conf.proxyList:
logger.critical(getSafeExString(ex))
warnMsg = "changing proxy"
logger.warn(warnMsg)
conf.proxy = None
setHTTPHandlers()
return search(dork)
else:
raise
finally:
kb.redirectChoice = popValue()
def setHTTPHandlers(): # Cross-referenced function
raise NotImplementedError
|
987,901 | 68b8cdae76f59fd43d07c7be7607ff2e6bffd94b | import calc
info = "\
Chapter 27: \n\
\t 1) motion in a magnetic field\n\
\t 2) hall effect\n\
\t 3) magnetic torque\n\
\t 4) magnetic potential energy\n\
Chapter 28: \n\
Chapter 29: \n\
\t 5) displacement current density (two plates)\n\
\t 6) B field from Ampere's Law (two plates)\n\
Chapter 30: \n\
Chapter 31: \n\
\t 7) impedance\n\
\t 8) L-R-C phase angle\n\
\t 9) voltage\n\
\t 10) voltage given amplitude\n\
\t 11) current\n\
\t 12) average power\n\
\t 13) voltage (R)\n\
\t 14) voltage (L)\n\
\t 15) voltage (C)\n\
\t 16) all voltages given max voltage\n\
Chapter 32: \n\
\t 17) electromagnetic wave amplitudes\n\
\t 18) average pressure\n\
\t 19) intensity from power\n\
Chapter 33: \n\
\t 20) law of refraction\n\
\t 21) total internal reflection\n\
Chapter 34: \n\
\t 22) lateral magnification (y)\n\
\t 23) lateral magnification (s)\n\
\t 24) lateral magnification (skip m)\n\
\t 25) lateral magnification for refracting surfaces\n\
\t 26) object and image distances (spherical refracting surface)\n\
\t 27) object and image distances (plane refracting surface)\n\
\t 28) lensmaker\n\
\t 29) focal point\n\
\t 30) focal point concave spherical mirror\n\
\t 31) focal length\n\
Chapter 35: \n\
\t 32) bright fringe location\n\
\t 33) double-slit interference\n\
\t 34) double-slit interference intensity\n\
\t 35) phase angle\n\
Chapter 36: \n\
\t 36) bright fringe location\n\
\t 37) single-slit diffraction\n\
\t 38) single-slit diffraction intensity\n\
Chapter 37:\n\
\t 39) time dilation\n\
\t 40) length contraction\n\
\t 41) simple speed\n\
\t 42) simple speed relative to light\n\
\t 43) gamma\n\
\t 44) lorentz transformation: x\n\
\t 45) lorentz transformation: t\n\
\t 46) lorentz transformation: v\n\
Other: \n\
\t 47) degrees from radians\n\
\t 48) wave basics\n\
"
def main():
equation = ""
prev = ""
while(equation != "exit"):
equation = input("Equation Name: ")
if equation == "info":
print(info)
elif equation == "prev":
equation = prev
print("now running",prev,"function again")
######## CHAPTER 27 ##########
if equation == "motion in a magnetic field" or equation == "1":
print("motion in a magnetic field")
r = input("R: ")
m = input("m: ")
v = input("v: ")
q = input("q: ")
b = input("b: ")
print(calc.motionInMagneticField(r,m,v,q,b))
elif equation == "hall effect" or equation == "2":
print("hall effect")
n = input("n: ")
q = input("q: ")
j = input("J: ")
b = input("B: ")
e = input("E: ")
print(calc.hallEffect(n,q,j,b,e))
elif equation == "magnetic torque" or equation == "3":
print("magnetic torque")
t = input("t: ")
i = input("I: ")
b = input("B: ")
a = input("A: ")
phi = input("phi: ")
print(calc.magneticTorque(t,i,b,a,phi))
elif equation == "magnetic potential energy" or equation == "4":
print("magnetic potential energy")
u = input("U: ")
miu = input("miu: ")
b = input("B: ")
phi = input("phi: ")
print(calc.magneticPotentialEnergy(u,miu,b,phi))
######## CHAPTER 28 ##########
######## CHAPTER 29 ##########
elif equation == "displacement current density (two plates)" or equation == "5":
print("displacement current density (two plates)")
j = input("j: ")
i = input("i: ")
r = input("R: ")
print(calc.dispCurrentDensity(j,i,r))
elif equation == "B field from Ampere's Law (two plates)" or equation == "6":
print("B field from Ampere's Law (two plates)")
r = input("r: ")
R = input("R: ")
i = input("i(c): ")
print(calc.bFromAmpereTwoPlates(r,R,i))
######## CHAPTER 30 ##########
######## CHAPTER 31 ##########
elif equation == "impedance" or equation == "7":
print("impedance")
omega = input("omega: ")
l = input("L: ")
r = input("R: ")
c = input("C: ")
print(calc.impedance(r,omega,l,c))
elif equation == "L-R-C phase angle" or equation == "8":
print("L-R-C phase angle")
phi = input("phi (phase angle): ")
w = input("omega (angular frequency): ")
l = input("L: ")
r = input("R: ")
c = input("C: ")
print(calc.lrcPhaseAngle(phi,w,l,r,c))
elif equation == "voltage" or equation == "9":
print("voltage")
i = input("I: ")
l = input("L: ")
r = input("R: ")
c = input("C: ")
t = input("t: ")
w = input("omega: ")
phi = input("phi: ")
print(calc.voltage(i,l,r,c,w,phi,t))
elif equation == "voltage given amplitude" or equation == "10":
print("voltage given amplitude")
v = input("V: ")
t = input("t: ")
w = input("omega: ")
phi = input("phi: ")
print(calc.voltageGivenAmplitude(v,w,phi,t))
elif equation == "current" or equation == "11":
print("current")
i = input("I (amplitude): ")
w = input("omega: ")
t = input("t: ")
phi = input("phi: ")
print(calc.current(i,w,t,phi))
elif equation == "average power" or equation == "12":
print("average power")
v = input("V: ")
i = input("I: ")
phi = input("phi: ")
print(calc.power(v,i,phi))
elif equation == "voltage (R)" or equation == "13":
print("voltage (R)")
i = input("I: ")
r = input("R: ")
w = input("omega: ")
t = input("t: ")
v = float(i)*float(r)
print(calc.voltageGivenAmplitude(v,w,"0 degrees",t))
elif equation == "voltage (L)" or equation == "14":
print("voltage (L)")
i = input("I: ")
l = input("L: ")
w = input("omega: ")
t = input("t: ")
v = float(i)*float(w)*float(l)
print(calc.voltageGivenAmplitude(v,w,"90 degrees",t))
elif equation == "voltage (C)" or equation == "15":
print("voltage (C)")
i = input("I: ")
c = input("C: ")
w = input("omega: ")
t = input("t: ")
v = float(i)*(1/(float(w)*float(c)))
print(calc.voltageGivenAmplitude(v,w,"-90 degrees",t))
elif equation == "all voltages given max voltage" or equation == "16":
print("all voltages given max voltage")
v = input("V: ")
i = input("I: ")
t = input("t: ")
w = input("omega: ")
phi = input("phi: ")
l = input("L: ")
r = input("R: ")
c = input("C: ")
print("v",calc.voltageGivenAmplitude(v,w,phi,t))
vr = float(i)*float(r)
print("R", calc.voltageGivenAmplitude(vr,w,"0 degrees",t))
vl = float(i)*float(w)*float(l)
print("L",calc.voltageGivenAmplitude(vl,w,"90 degrees",t))
vc = float(i)*(1/(float(w)*float(c)))
print("C",calc.voltageGivenAmplitude(vc,w,"-90 degrees",t))
######## CHAPTER 32 ##########
"""
elif equation == "electromagnetic wave cross product":
E = input("E (sign direction): ")
B = input("B (sign direction): ")
W = input("Electromagnetic wave (sign direction): ")
print(calc.crossProduct(E,B,W))
"""
if equation == "electromagnetic wave amplitudes" or equation == "17":
print("electromagnetic wave amplitudes")
B = input("Bmax: ")
E = input("Emax: ")
print(calc.electromagneticWaveAmplitudes(B,E))
if equation == "average pressure" or equation == "18":
print("average pressure")
Emax = input("Emax: ")
Bmax = input("Bmax: ")
print(calc.averagePressure(Emax,Bmax))
if equation == "intensity from power" or equation == "19":
print("intensity from power")
i = input("I: ")
p = input("p (power): ")
a = input("A (area): ")
print(calc.intensityFromPower(i,p,a))
######## CHAPTER 33 ##########
elif equation == "law of refraction" or equation == "20":
print("law of refraction")
na = input("na: ")
nb = input("nb: ")
theta_a = input("theta (a): ")
theta_b = input("theta (b): ")
print(calc.lawOfRefraction(na,nb,theta_a,theta_b))
elif equation == "total internal reflection" or equation == "21":
print("total internal reflection")
na = input("na: ")
nb = input("nb: ")
critical = input("theta (critical): ")
print(calc.totalInternalReflection(na,nb,critical))
######## CHAPTER 34 ##########
elif equation == "lateral magnification (y)" or equation == "22":
print("lateral magnification (y)")
m = input("m: ")
y = input("y: ")
yprime = input("y': ")
print(calc.lateralMagnificationY(m,y,yprime))
elif equation == "lateral magnification (s)" or equation == "23":
print("lateral magnification (s)")
m = input("m: ")
s = input("s: ")
sprime = input("s': ")
print(calc.lateralMagnificationS(m,s,sprime))
elif equation == "lateral magnification (skip m)" or equation == "24":
print("lateral magnification (skip m)")
s = input("s: ")
sprime = input("s': ")
y = input("y: ")
yprime = input("y': ")
print(calc.lateralMagnification(s,sprime,y,yprime))
elif equation == "lateral magnification for refracting surfaces" or equation == "25":
print("lateral magnification for refracting surfaces")
m = input("m: ")
s = input("s: ")
sprime = input("s': ")
na = input("na: ")
nb = input("nb: ")
print(calc.lateralMagnificationRefractingSurfaces(m,s,sprime,na,nb))
elif equation == "lensmaker" or equation == "28":
print("lensmaker")
f = input("f: ")
n = input("n: ")
r1 = input("R1: ")
r2 = input("R2: ")
print(calc.lensmaker(f,n,r1,r2))
elif equation == "focal point" or equation == "29":
print("focal point")
f = input("f: ")
s = input("s: ")
sprime = input("s': ")
print(calc.focalPoint(f,s,sprime))
elif equation == "focal length" or equation == "31":
print("focal length")
r = input("R: ")
f = input("F: ")
print(calc.focalLength(r,f))
elif equation == "focal point concave spherical mirror" or equation == "30":
print("focal point concave spherical mirror")
r = input("R: ")
s = input("s: ")
sprime = input("s': ")
print(calc.focalPointCSM(r,s,sprime))
elif equation == "object and image distances (spherical refracting surface)" or equation == "26":
print("object and image distances (spherical refracting surface)")
na = input("na: ")
nb = input("nb: ")
s = input("s: ")
sprime = input("s': ")
r = input("R: ")
print(calc.distancesSphericalRefracting(na,nb,s,sprime,r))
elif equation == "object and image distances (plane refracting surface)" or equation == "27":
print("object and image distances (plane refracting surface)")
na = input("na: ")
nb = input("nb: ")
s = input("s: ")
sprime = input("s': ")
print(calc.distancesPlaneRefracting(na,nb,s,sprime))
######## CHAPTER 35 ##########
elif equation == "bright fringe location" or equation == "32" or equation == "36":
print('bright fringe location')
y = input("y: ")
r = input("R: ")
m = input("m: ")
wvl = input("wavelength: ")
d = input("d: ")
print(calc.doubleSlitInterferenceBrightFringeLocation(y,r,m,wvl,d))
elif equation == "double-slit interference intensity" or equation == "34":
print("double-slit interference intensity")
i = input("I: ")
i0 = input("I0: ")
phi = input("phi: ")
print(calc.doubleSlitInterferenceIntensity(i,i0,phi))
elif equation == "double-slit interference" or equation == "33":
print("double-slit interference")
interferenceType = input("destructive or constructive (c/d): ")
if interferenceType == "c":
offset = 0
else: offset = 0.5
d = input("d: ")
theta = input("theta: ")
wvl = input("wavelength: ")
m = input("m: ")
print(calc.doubleSlitInterference(offset,d,theta,wvl,m))
elif equation == "phase angle" or equation == "35":
print("phase angle")
phi = input("phase angle: ")
wvl = input("wavelength: ")
diff = input("(r2 - r1): ")
print(calc.phaseAngle(phi,wvl,diff))
######## CHAPTER 36 ##########
elif equation == "single-slit diffraction" or equation == "37":
print("single-slit diffraction")
m = input("m: ")
wvl = input("wavelength: ")
a = input("a: ")
theta = input("theta: ")
print(calc.singleSlitDiffraction(m,wvl,a,theta))
elif equation == "single-slit diffraction intensity" or equation == "38":
print("single-slit diffraction intensity")
i = input("I: ")
i0 = input("I0: ")
a = input("a: ")
wvl = input("wavelength: ")
option = input("sin(theta) or theta (s/t): ")
if option == "s":
theta = input("sin(theta): ")
else: theta = input("theta: ")
print(calc.singleSlitDiffractionIntensity(i,i0,a,wvl,theta,option))
######## CHAPTER 37 ##########
elif equation == "time dilation" or equation == "39":
print("time dilation")
t = input("t: ")
t0 = input("t0: ")
udivc = input("u/c: ")
print(calc.timeDilation(t,t0,udivc))
elif equation == "length contraction" or equation == "40":
print("length contraction")
l = input("l: ")
l0 = input("l0: ")
udivc = input("u/c: ")
print(calc.lengthContraction(l,l0,udivc))
elif equation == "gamma" or equation == "43":
print("gamma")
udivc = input("u/c: ")
g = input("gamma: ")
print(calc.calculateGamma(udivc,g))
elif equation == "lorentz transformation: x" or equation == "44":
print("lorentz transformation: x")
x = input("x: ")
xprime = input("x': ")
udivc = input("u/c: ")
t = input("t: ")
print(calc.lorentzX(x,xprime,udivc,t))
elif equation == "lorentz transformation: t" or equation == "45":
print("lorentz transformation: t")
t = input("t: ")
tprime = input("t': ")
udivc = input("u/c: ")
x = input("x: ")
print(calc.lorentzT(t,tprime,udivc,x))
elif equation == "lorentz transformation: v" or equation == "46":
print("lorentz transformation: v")
v = input("v: ")
vprime = input("v': ")
udivc = input("u/c: ")
print(calc.lorentzV(v,vprime,udivc))
elif equation == "simple speed" or equation == "41":
print("simple speed")
v = input("v: ")
t = input("t: ")
d = input("d: ")
print(calc.speedTimeDist(v,t,d))
elif equation == "simple speed relative to light" or equation == "42":
print("simple speed relative to light")
udivc = input("u/c: ")
t = input("t: ")
d = input("d: ")
v = float(udivc) * calc.SPEEDOFLIGHT
print(calc.speedTimeDist(v,t,d))
########### OTHER ############
elif equation == "degrees from radians" or equation == "47":
print("degrees from radians")
rad = input("angle (in radians): ")
print(calc.radiansToDegrees(rad))
elif equation == "wave basics" or equation == "48":
print("wave basics")
v = input("v: ")
f = input("f: ")
wvl = input("wavelength: ")
print(calc.wave(v,f,wvl))
prev = equation
if __name__== "__main__" :
main() |
987,902 | 82bbf6c470e37dc72e57afad9000d81eea5da253 | from django.conf import settings
from django.urls import path,include
from .views import ModelSave
from django.conf.urls.static import static
urlpatterns = [
path('mldb/',ModelSave.as_view()),
] + static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) |
987,903 | b3038166c105364c2f263215197e5747bdf181cf | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright © 2019 Manoel Vilela
#
# @project: Inteligência Computacional UFC 2019.1 - Redes Neurais
# @author: Manoel Vilela
# @email: manoel_vilela@engineer.com
#
"""-- Módulo com algoritmos de teste e métricas de avaliação.
Para classificação:
+ accuracy
Para regressão:
+ r2
Algoritmos de separação de treinamento/teste:
+ hold_out
+ kfold
+ leave_one_out
Ambos algoritmos de recorte devolvem partições de N coleções de X e y
na forma (X, y).
"""
import numpy as np
from processing import concat
def accuracy(y_test, y_pred):
"""Calcula métrica de acurácia para classificação."""
n = len(y_test)
corrects = sum([bool(y1 == y2) for y1, y2 in zip(y_test, y_pred)])
return corrects/n
def r2(y_test, y_pred):
"""Computa o coeficiente de ajuste de curva r² para regressão."""
y_mean = np.mean(y_test)
n = len(y_test)
SQE = sum((y_test - y_pred) ** 2)
Syy = sum((y_test - y_mean) ** 2)
r = SQE / Syy
r2 = 1 - r
return r2
def hold_out(X, y, test_size=0.30):
"""Esquema de particionamento de dados train/test split.
Particiona X,y de forma ordenada após embaralhamento baseado no ponto
de corte `test_size`.
"""
shape = y.shape
n = len(y)
c = shape[1] if len(shape) > 1 else 1
dataset = concat(X, y)
# dataset embaralhado (shuffled)
np.random.shuffle(dataset)
X_s, y_s = dataset[:, :-c], dataset[:, -c:]
test_index = round(test_size * n)
X_train = X_s[test_index:]
y_train = y_s[test_index:]
X_test = X_s[:test_index]
y_test = y_s[:test_index]
return X_train, X_test, y_train, y_test
def kfold(X, y, k=5):
"""Separa o conjunto de dados na forma de train/test em k partições (folds).
Cada elemento da lista possui (X_train, X_test, y_train, y_test).
O dataset de treinamento possui (k-1) folds participantes e o de
teste apenas um dos folds.
"""
shape = y.shape
n = len(y)
c = shape[1] if len(shape) > 1 else 1
dataset = concat(X, y)
np.random.shuffle(dataset)
splits = np.vsplit(dataset, k)
folds = []
for i in range(k):
fold_test = splits[i]
train_index = list(range(k))
train_index.remove(i)
train_list = []
for j in train_index:
train_list.append(splits[j])
fold_train = np.concatenate(train_list)
X_train = fold_train[:, :-c]
y_train = fold_train[:, -c:]
X_test = fold_test[:, :-c]
y_test = fold_test[:, -c:]
fold = (X_train, X_test, y_train, y_test)
folds.append(fold)
return folds
def leave_one_out(X, y):
"""Estratégia de split train/test leave_one_out.
A ideia é centralizada em remover apenas 1 amostra e considerar o
teste. Todo o resto é o treinamento.
Como analogia k-fold para quando k=n, sendo n o número de linhas do dataset,
esses algoritmos se tornam idênticos.
"""
n = len(y)
c = y.shape[1] if len(y.shape) > 1 else 1
m = X.shape[1] if len(X.shape) > 1 else 1
dataset = concat(X, y)
np.random.shuffle(dataset)
folds = []
n = len(X)
for i in range(n):
dataset_test = dataset[i]
dataset_train = np.delete(dataset, i, axis=0)
X_train = dataset_train[:, :-c]
y_train = dataset_train[:, -c:]
X_test = dataset_test[:-c].reshape((1,m))
y_test = dataset_test[-c:].reshape((1,c))
fold = (X_train, X_test, y_train, y_test)
folds.append(fold)
return folds
|
987,904 | cd7c7d0f592ed2ee6c042e558236a76ea1b5b156 | #!/usr/bin/env python
from __future__ import division
from __future__ import print_function
from __future__ import absolute_import
import sys
import os
import gzip
infile = "merged_universal_neg_intersected.bed.gz"
chroms_file = "hg38.chrom.sizes"
chrom_to_size = dict([(x.rstrip().split("\t")[0],
int(x.rstrip().split("\t")[1]))
for x in open(chroms_file)])
def take_best_peak(options):
flank = int(options.flank)
rank_col = options.col_to_rank_by
fh = gzip.open(infile,"rb")
best_seen_deets = None
last_region_id = None
for line in fh:
line_arr = line.decode("utf-8").rstrip().split("\t")
chrom = line_arr[0]
region_id = "_".join(line_arr[0:3])
peak_height = float(line_arr[9])
summit = int(line_arr[4])+int(line_arr[12])
if region_id != last_region_id:
if (best_seen_deets is not None):
if ((best_seen_deets[1] > flank) and
(best_seen_deets[1] <
(chrom_to_size[best_seen_deets[0]]-flank))):
print(best_seen_deets[0]+"\t"
+str(best_seen_deets[1]-flank)+"\t"
+str(best_seen_deets[1]+flank)+"\t"
+str(best_seen_deets[2]))
best_seen_deets = [chrom,summit,peak_height]
last_region_id = region_id
if (peak_height > best_seen_deets[2]):
best_seen_deets = [chrom,summit,peak_height]
#last line
if ((best_seen_deets[1] > flank) and
(best_seen_deets[1] <
(chrom_to_size[best_seen_deets[0]]-flank))):
print(best_seen_deets[0]+"\t"
+str(best_seen_deets[1]-flank)+"\t"
+str(best_seen_deets[1]+flank)+"\t"
+str(best_seen_deets[2]))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--col_to_rank_by", default=9, help="Zero indexed")
parser.add_argument("--flank")
options = parser.parse_args()
take_best_peak(options)
|
987,905 | 68ee7cda139d89d0a2aac024c7908d2aef691661 | import os
from io import StringIO
from django.core.management import BaseCommand
from django.core.management import call_command
import yaml
class Command(BaseCommand):
help = "Generate a schema file and add relevant metadata."
def handle(self, *args, **options):
"""Read the API document from generateschema and reformat it."""
file = StringIO()
call_command("generateschema", stdout=file)
file.seek(0)
document = yaml.load(file, Loader=yaml.FullLoader)
document.update({
"externalDocs": {
"description": "Check us out on GitHub",
"url": "https://github.com/ractf",
},
"info": {
"title": "RACTF Core",
"version": os.popen("git rev-parse HEAD").read().strip()[:8],
"description": "The API for RACTF.",
"contact": {
"name": "Support",
"email": "support@reallyawesome.atlassian.net",
"url": "https://reallyawesome.atlassian.net/servicedesk/customer/portals",
},
"x-logo": {
"url": "https://www.ractf.co.uk/brand_assets/combined/wordmark_white.svg",
"altText": "RACTF Logo",
},
}
})
print(yaml.dump(document))
|
987,906 | 5ed7099e6c9256ba7823a71b56e6b674df707ea4 | for i in "python":
if i=="h":
continue #corta el flujo de ejecucion y no hace el print, lo devuelve al for
print(f"viendo la letra {i}")
nombre="Fabricio Vargas"# Voy a contar solo las letras y no los espacios en blanco
contador=0
for i in nombre:
if i==" ":
continue
contador+=1
print(contador)
|
987,907 | b7d489bb7f3c1072e64183ca4be36e654ca7f991 | # Generated by Django 3.0.4 on 2020-04-26 11:16
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import user.models
class Migration(migrations.Migration):
dependencies = [
('user', '0004_auto_20200426_1102'),
]
operations = [
migrations.AddField(
model_name='userprofile',
name='background_picture',
field=models.ImageField(blank=True, null=True, upload_to=user.models.upload_user_profile_path, verbose_name='background_picture'),
),
migrations.AlterField(
model_name='userprofile',
name='gender',
field=models.CharField(blank=True, choices=[('M', 'Male'), ('F', 'Female')], max_length=10, null=True, verbose_name='gender'),
),
migrations.AlterField(
model_name='userprofile',
name='profile_picture',
field=models.ImageField(blank=True, null=True, upload_to=user.models.upload_user_profile_path, verbose_name='profile_picture'),
),
migrations.AlterField(
model_name='userprofile',
name='user',
field=models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, related_name='profile', to=settings.AUTH_USER_MODEL),
),
]
|
987,908 | c17b4672bf3d3a5930643265e2594cf0785c4251 | import ispisivanje
print(type(ispisivanje))
ispisivanje.pprint ('Pozdrav, svijete')
ispisivanje.pprint ('Moduli su super')
ispisivanje.pprint ('Python je zaista super')
ispisivanje.print_upper ('Ovo ide u velike slove')
from math import sgrt as korijen
print(korijen(2)) |
987,909 | 8cde1ce2f0999bc105107656941101476e2653c2 | #!/usr/bin/env python
import numpy as np
from scipy.optimize import curve_fit
X = np.array([
0.00,
6.05,
11.87,
18.01,
23.99,
30.01
])
Y = np.array([
2.36,
1.85,
1.60,
1.45,
1.19,
1.09
]) * 0.01
def fit_func(x, a, c):
return a * x ** 2 - 72 * a * x + c
fx = np.linspace(0, 40, 100)
fparams, fcovariances = curve_fit(fit_func, X, Y)
print(fparams) |
987,910 | 278a89b08cca8d50afbfae3741c012ff79d9aaac | import glob
import os
import signal
import open3d as o3d
import numpy as np
import fire
DOWNSAMPLE_VOXEL_SIZE = 2
def find_raytrix_pcd_files(pcd_dir: str):
pcd_files = glob.glob(os.path.join(pcd_dir, '*.pcd'))
pcd_files.sort(key=lambda v: int(os.path.basename(v).split('_')[3])) # Sort by frame number
return pcd_files
def rolling_composite_registration(pcds, threshold, voxel_size=DOWNSAMPLE_VOXEL_SIZE, visualize=False):
transforms = [np.eye(4)]
composite_pcd = pcds[0]
composite_pcd.estimate_normals()
# Initial guess at registration transform (uncomment one)
# transform_guess = lambda: np.eye(4) # Identity transform
transform_guess = lambda: transforms[-1] # Last transform
if visualize:
vis = o3d.visualization.Visualizer()
vis.create_window()
vis.add_geometry(composite_pcd)
for idx, pcd in enumerate(pcds[1:]):
registration = o3d.pipelines.registration.registration_icp(
pcd, composite_pcd,
threshold,
transform_guess(),
o3d.pipelines.registration.TransformationEstimationPointToPlane(),
o3d.pipelines.registration.ICPConvergenceCriteria(max_iteration=10)
)
transform = registration.transformation
transforms.append(transform)
if visualize:
vis.remove_geometry(composite_pcd)
# Transform point cloud and append to composite
pcd.transform(transform)
composite_pcd += pcd
# Downsample the composite and re-estimate normals
# composite_pcd, _ = composite_pcd.remove_radius_outlier(5, 20)
# composite_pcd, _ = composite_pcd.remove_statistical_outlier(20, 2.0)
composite_pcd = composite_pcd.voxel_down_sample(voxel_size=3*voxel_size)
composite_pcd.estimate_normals()
if visualize:
vis.add_geometry(composite_pcd)
vis.poll_events()
vis.update_renderer()
if visualize:
while vis.poll_events():
vis.update_renderer()
vis.destroy_window()
return transforms
def main(pcd_dir: str):
os.setpgrp()
try:
o3d.utility.set_verbosity_level(o3d.utility.VerbosityLevel.Debug)
pcd_files = find_raytrix_pcd_files(pcd_dir)
pcds = [o3d.io.read_point_cloud(pcd_file) for pcd_file in pcd_files[::5]]
print(len(pcds))
transforms = rolling_composite_registration(pcds, 1e4, visualize=True)
finally:
os.killpg(0, signal.SIGKILL)
if __name__ == '__main__':
fire.Fire(main)
|
987,911 | eb3f3e108b81bd3ea0fdd013cf80ce10aa40e4fc | import math
K = 15
# C = 2
lambdaa = 10 # llegan 10 clientes en intervalo de 1 hora => Poisson(lambda = 10)
mu = 6 # Tiempo de servicio promedio: 10 minutos = 1/6 hora => media = 1/mu => exp(mu = 6)
costo_encola = 10
costo_cajero = 15
def first_sum(C):
counter = 0
for i in range(0, C+1):
to_add = (lambdaa**i) / (math.factorial(i) * (mu ** i))
counter += to_add
return counter
def second_sum(C):
counter = 0
for j in range(C + 1, K+1):
counter += ((1 / (math.factorial(C) * (C**(j - C)))) * ((lambdaa / mu) ** j))
return counter
def p0(C):
return (1/(first_sum(C)+second_sum(C)))
def p_n(n, c):
return p0(c) * ((1 / (math.factorial(c) * (c**(n - c)))) * ((lambdaa / mu) ** n))
def Lq(C, K):
counter = 0
# Sumatoria de C+1 a K
for j in range(C + 1, K + 1):
counter += (j - C) * p_n(j, C)
return counter
for C in range(1, K):
P0 = 1 / (first_sum(C) + second_sum(C))
costo_total = C * costo_cajero + Lq(C, K) * costo_encola
print(f'C = {C}, Lq = {Lq(C,K)}, Costo total = {costo_total}')
|
987,912 | 5b8c5d30ae7cd3e8888c42defbe380f3f1051e17 | import os
import numpy as np
import matplotlib.pyplot as plt
from thinkdsp import read_wave
wave = read_wave('C:/Users/38407/Desktop/2/数字信号处理/python/5python练习第三章/72475__rockwehrmann__glissup02.wav')
plt.rcParams['font.sans-serif']=['SimHei']
plt.rcParams['axes.unicode_minus']=False
wave.make_spectrogram(512).plot(high=5000)
plt.ylabel('频率(HZ)')
plt.xlabel('时间(s)')
wave.write(filename='output3-4.wav')
plt.show()
|
987,913 | fd81ef81b3e8f7636279cbb1eda37fe6c195ad46 | # This program uses a thermal printer to print out various information from
# the internet or the "fortune" program
from Adafruit_MCP230xx import *
import RPi.GPIO as GPIO
import time, subprocess, re, textwrap, urllib, urllib2, os, Image, ImageDraw, unicodedata, datetime
from xml.dom.minidom import parseString
from bs4 import BeautifulSoup, NavigableString
from threading import Thread
printerLibrary = __import__('printer')
p = printerLibrary.ThermalPrinter(serialport="/dev/ttyAMA0")
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
BTN_0 = 18
BTN_1 = 23
BTN_2 = 24
BTN_3 = 25
BTN_4 = 17
BTN_5 = 27
# Enable the pullup resistors on the buttons
GPIO.setup(BTN_0, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_1, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_2, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_3, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_4, GPIO.IN, pull_up_down=GPIO.PUD_UP)
GPIO.setup(BTN_5, GPIO.IN, pull_up_down=GPIO.PUD_UP)
# Use busnum = 1 for new Raspberry Pi's (512MB with mounting holes)
mcp = Adafruit_MCP230XX(busnum = 1, address = 0x20, num_gpios = 16)
# Set pins 0, 1 and 2 to output (you can set pins 0..15 this way)
mcp.config(0, mcp.OUTPUT)
mcp.config(1, mcp.OUTPUT)
mcp.config(2, mcp.OUTPUT)
mcp.config(3, mcp.OUTPUT)
mcp.config(4, mcp.OUTPUT)
mcp.config(5, mcp.OUTPUT)
def print_weather(zipcode):
if zipcode!="":
file = urllib2.urlopen('http://weather.yahooapis.com/forecastrss?p='+zipcode)
data = file.read()
file.close()
dom = parseString(data)
conditionTag = dom.getElementsByTagName('yweather:condition')
currentImageCode = conditionTag[0].attributes['code'].value
imageFilename = "weather_imgs/"+currentImageCode+".gif"
urllib.urlretrieve("http://l.yimg.com/a/i/us/we/52/"+currentImageCode+".gif", imageFilename)
im = Image.open(imageFilename)
transparency = im.info['transparency']
os.remove(imageFilename)
imageFilename = imageFilename.replace('.gif', '.png')
im.save(imageFilename, transparency=transparency)
data = list(im.getdata())
w, h = im.size
#p.print_bitmap(data, w, h)
currentText = conditionTag[0].attributes['text'].value
currentTemp = conditionTag[0].attributes['temp'].value
print "Now: " + currentText+" "+currentTemp+" F\n"
p.inverse_on()
p.bold_on()
p.print_text("Now:")
p.inverse_off()
p.print_text(" "+currentText+" "+currentTemp)
p.print_text(chr(0xF8))
p.print_text("F\n")
p.bold_off()
forecastTag = dom.getElementsByTagName('yweather:forecast')
todayDay = forecastTag[0].attributes['day'].value
todayText = forecastTag[0].attributes['text'].value
todayHigh = forecastTag[0].attributes['high'].value
todayLow = forecastTag[0].attributes['low'].value
print todayDay + ": " + todayText
print "High: " + todayHigh + " F Low: " + todayLow + " F"
p.inverse_on()
p.bold_on()
p.print_text(todayDay + ":")
p.inverse_off()
p.print_text(" "+ todayText+"\n")
p.print_text(" High: " + todayHigh)
p.print_text(chr(0xF8))
p.print_text("F Low: " + todayLow)
p.print_text(chr(0xF8))
p.print_text("F\n")
tomorrowDay = forecastTag[1].attributes['day'].value
tomorrowText = forecastTag[1].attributes['text'].value
tomorrowHigh = forecastTag[1].attributes['high'].value
tomorrowLow = forecastTag[1].attributes['low'].value
print tomorrowDay + ": " + tomorrowText
print "High: " + tomorrowHigh + " F Low: " + tomorrowLow + " F"
p.inverse_on()
p.bold_on()
p.print_text(tomorrowDay + ":")
p.inverse_off()
p.print_text(" "+ tomorrowText+"\n")
p.print_text(" High: " + tomorrowHigh)
p.print_text(chr(0xF8))
p.print_text("F Low: " + tomorrowLow)
p.print_text(chr(0xF8))
p.print_text("F\n")
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(2)
else:
print "No zip code entered"
def insert(original, new, pos):
#Inserts new inside original at pos.
return original[:pos] + new + original[pos:]
def print_word_of_day():
# Print word of the day
url = "http://www.merriam-webster.com/word/index.xml"
response = urllib2.urlopen(urllib2.Request(url))
the_page = response.read()
dom = parseString(the_page)
#retrieve the first xml tag (<tag>data</tag>) that the parser finds with name tagName:
summaryTag = dom.getElementsByTagName('itunes:summary')[1].toxml()
#strip off the tag (<tag>data</tag> ---> data):
summaryData=summaryTag.replace('<itunes:summary>','').replace('</itunes:summary>','').replace('"','"').replace('\n\n','\n').replace('\n','',1).replace("Merriam-Webster's Word of the Day", "Word of the Day")
summaryData = summaryData[:summaryData.index('\n', (summaryData.index('Examples:')+15))]
formattedData = unicodedata.normalize('NFKD', summaryData).encode('ascii','ignore')
print formattedData
p.inverse_on()
p.bold_on()
p.print_text(word_wrap(formattedData[0:formattedData.index(":")], 32))
p.inverse_off()
p.bold_off()
restofText = formattedData[formattedData.index(":"):]
restofFormatted = word_wrap(insert(restofText,"\n",restofText.index('\\')), 32)
p.bold_on()
p.underline_on()
p.print_text(restofFormatted[:restofFormatted.index('\\')])
p.bold_off()
p.underline_off()
p.print_text(restofFormatted[restofFormatted.index('\\'):])
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
def print_verse_of_day():
file = urllib2.urlopen('http://feeds.feedburner.com/hl-devos-votd?format=xml')
data = file.read()
file.close()
dom = parseString(data)
titleTag = dom.getElementsByTagName('title')[1].toxml()
titleTag = titleTag.replace('<title>', '').replace('</title>','')
titleTag = titleTag[titleTag.find('- ')+2:]
print titleTag
descTag = dom.getElementsByTagName('description')[1].toxml()
descTag = descTag.replace('<description>', '')
descTag = descTag[0:descTag.find('&')]
descTag = descTag.replace('"', '"')
print descTag
p.inverse_on()
p.bold_on()
p.print_text('Verse of the Day:\n')
p.inverse_off()
p.underline_on()
p.print_text(titleTag)
p.bold_off()
p.underline_off()
p.linefeed()
p.print_text(word_wrap(descTag, 32))
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
def print_today_in_history():
file = urllib2.urlopen('http://www.factmonster.com/dayinhistory')
html_doc = file.read()
file.close()
html_doc = html_doc[html_doc.find('<td class="bodybg"'):html_doc.find('<div class="feeds"')]
soup = BeautifulSoup(html_doc)
count = 0
titles = soup.find_all('h3')
events = soup.find_all('p', recursive=True)
p.underline_on()
now = datetime.datetime.now()
p.print_text('Today in History: '+ now.strftime('%B %d') + '\n')
print 'Today in History: ' + now.strftime('%B %d')
p.underline_off()
for title in titles:
title = ''.join(title)
print title
print strip_tags(str(events[count]))
p.inverse_on()
p.bold_on()
p.print_text(title+'\n')
p.inverse_off()
p.bold_off()
punctuation = { 0x2018:0x27, 0x2019:0x27, 0x201C:0x22, 0x201D:0x22 }
eventText = unicode(events[count]).translate(punctuation).encode('ascii', 'ignore')
p.print_text(word_wrap(str(strip_tags(eventText)), 32))
p.bold_off()
p.linefeed()
count += 1
p.linefeed()
p.linefeed()
p.linefeed()
def strip_tags(html):
soup = BeautifulSoup(html)
invalid_tags = ['b', 'i', 'u', 'a', 'html', 'body', 'p']
for tag in invalid_tags:
for match in soup.findAll(tag):
match.replaceWithChildren()
return soup
def word_wrap(string, width=80, ind1=0, ind2=0, prefix=''):
""" word wrapping function.
string: the string to wrap
width: the column number to wrap at
prefix: prefix each line with this string (goes before any indentation)
ind1: number of characters to indent the first line
ind2: number of characters to indent the rest of the lines
"""
string = prefix + ind1 * " " + string
newstring = ""
while len(string) > width:
# find position of nearest whitespace char to the left of "width"
marker = width - 1
while not string[marker].isspace():
marker = marker - 1
# remove line from original string and add it to the new string
newline = string[0:marker] + "\n"
newstring = newstring + newline
string = prefix + ind2 * " " + string[marker + 1:]
return newstring + string
currentLED = 0
NUM_LEDS = 5
lastLED = NUM_LEDS
for x in range(0, NUM_LEDS+1):
mcp.output(x, 0)
shouldBlink = False
shouldBlink2 = False
def blinkLED(led):
global shouldBlink
while shouldBlink==True:
mcp.output(led, 1)
time.sleep(.3)
mcp.output(led, 0)
time.sleep(.3)
def blinkLED2(led):
global shouldBlink2
while shouldBlink2==True:
mcp.output(led, 1)
time.sleep(.7)
mcp.output(led, 0)
time.sleep(.7)
def bottom_btn_menu():
global shouldBlink
global shouldBlink2
while True:
if GPIO.input(BTN_5) == False:
time.sleep(1)
if GPIO.input(BTN_5) == False:
shouldBlink2 = False
time.sleep(3)
subprocess.call(['sudo', 'shutdown', '-h', 'now'])
raise KeyboardInterrupt
else:
shouldBlink2 = False
time.sleep(.5)
return
elif GPIO.input(BTN_0) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (0, ))
thread.start()
text = subprocess.check_output(["/usr/games/fortune", "-s", "science"])
#text = text.replace('A:', '\nA:')
#text = text.replace('--', '\n\n--')
text = ' '.join(text.split())
text_formatted = word_wrap(text, 32)
print text_formatted
p.print_text(text_formatted)
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(3)
shouldBlink = False
elif GPIO.input(BTN_1) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (1, ))
thread.start()
text = subprocess.check_output(["/usr/games/fortune", "-s", "humorists"])
text = text.replace('A:', '\nA:')
#text = text.replace('--', '\n\n--')
text = ' '.join(text.split())
text_formatted = word_wrap(text, 32)
print text_formatted
p.print_text(text_formatted)
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(3)
shouldBlink = False
elif GPIO.input(BTN_2) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (2, ))
thread.start()
text = subprocess.check_output(["/usr/games/fortune", "-s", "computers"])
text = text.replace('A:', '\nA:')
#text = text.replace('--', '\n\n--')
text = ' '.join(text.split())
text_formatted = word_wrap(text, 32)
print text_formatted
p.print_text(text_formatted)
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(3)
shouldBlink = False
elif GPIO.input(BTN_3) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (3, ))
thread.start()
text = subprocess.check_output(["/usr/games/fortune", "news"])
text = text.replace('A:', '\nA:')
#text = text.replace('--', '\n\n--')
text = ' '.join(text.split())
text_formatted = word_wrap(text, 32)
print text_formatted
p.print_text(text_formatted)
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(3)
shouldBlink = False
elif GPIO.input(BTN_4) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (4, ))
thread.start()
text = subprocess.check_output(["/usr/games/fortune", "politics"])
text = text.replace('A:', '\nA:')
#text = text.replace('--', '\n\n--')
text = ' '.join(text.split())
text_formatted = word_wrap(text, 32)
print text_formatted
p.print_text(text_formatted)
p.linefeed()
p.linefeed()
p.linefeed()
p.linefeed()
time.sleep(3)
shouldBlink = False
start = time.time()
while (True):
try:
if GPIO.input(BTN_0) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (0, ))
thread.start()
print_weather('11530')
time.sleep(3)
shouldBlink = False
time.sleep(.5)
elif GPIO.input(BTN_1) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (1, ))
thread.start()
print_word_of_day()
time.sleep(8)
shouldBlink = False
time.sleep(.5)
elif GPIO.input(BTN_2) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (2, ))
thread.start()
print_verse_of_day()
time.sleep(6)
shouldBlink = False
time.sleep(.5)
elif GPIO.input(BTN_3) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (3, ))
thread.start()
subprocess.call(['python', '/home/webide/repositories/my-pi-projects/printer_of_knowledge/sudoku-gfx.py'])
time.sleep(1)
p2 = printerLibrary.ThermalPrinter(serialport="/dev/ttyAMA0")
shouldBlink = False
time.sleep(.5)
elif GPIO.input(BTN_4) == False:
mcp.output(lastLED, 0)
shouldBlink = True
thread = Thread(target = blinkLED, args = (4, ))
thread.start()
print_today_in_history()
time.sleep(18)
shouldBlink = False
time.sleep(.5)
elif GPIO.input(BTN_5) == False:
mcp.output(lastLED, 0)
shouldBlink2 = True
thread = Thread(target = blinkLED2, args = (5, ))
thread.start()
time.sleep(2)
bottom_btn_menu()
else:
if time.time() - start > 1:
start = time.time()
mcp.output(currentLED, 1)
mcp.output(lastLED, 0)
lastLED = currentLED
currentLED += 1
if currentLED==6:
currentLED = 0
except KeyboardInterrupt:
for x in range(0, NUM_LEDS+1):
mcp.output(x, 1)
exit() |
987,914 | 589beb097abc15a7ac2207ba99a045c67330251f | #!/usr/bin/env python
# encoding: utf-8
import re
import json
import urllib
from datetime import datetime
from client.csrfopner import CSRFOpenerDirector
from bs4 import BeautifulSoup
HTTP = 'http://'
HTTPS = 'https://'
HOST = 'www.xuetangx.com'
BASE_URL_S = HTTPS + HOST
BASE_URL = HTTP + HOST
LOGIN_PAGE = BASE_URL_S + '/login'
LOGIN_URL = BASE_URL_S + '/login_ajax'
DASHBOARD = BASE_URL_S + '/dashboard'
SEARCH = BASE_URL_S + '/courses/search'
COURSES = BASE_URL + '/courses'
ENROLLMENT = BASE_URL_S + '/change_enrollment'
_COURSEWARE = '/courseware'
_VIDEO2SRC = BASE_URL_S + '/videoid2source/'
def full_url(path):
import urlparse
return urlparse.urljoin(BASE_URL_S, path)
class AuthenticationError(Exception):
pass
def __get_opener__(email=None, password=None):
"""
email: str
password: str
=> CSRFOpenerDirector
"""
opener = CSRFOpenerDirector()
opener.open(LOGIN_PAGE)
if email is None or password is None:
return opener
postdata = urllib.urlencode({
'email': email,
'password': password}).encode('utf-8')
resp = opener.open(LOGIN_URL, postdata).read()
success = json.loads(resp)['success']
if not success:
raise AuthenticationError()
return opener
def __get_page__(url, email=None, password=None, data=None):
opener = __get_opener__(email, password)
return opener.open(url, data=data).read()
def verify(email, password):
"""
email: str
password: str
=> bool. May raise Exception.
"""
opener = __get_opener__(email, password)
return (True if opener else False)
def student_info(email, password):
"""
email: str
password: str
=> (name, nickname)
"""
page = __get_page__(DASHBOARD, email, password)
from bs4 import BeautifulSoup
page = BeautifulSoup(page)
name = page.body.find('span', attrs={'class': 'data'}).text.strip()
nickname = page.body.find('h1', attrs={'class': 'user-name'}).text.strip()
return (name, nickname)
def __upcoming__(course):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
start_date = datetime.strptime(date_block[-1], '%Y-%m-%d')
university = course.find('h2', attrs={'class': 'university'}).text.strip()
id_title = course.find('section', attrs={'class': 'info'}).find('h3').find('span').text.strip().split()
course_id = id_title[0]
title = id_title[1]
img_url = full_url(course.find('img').attrs['src'])
return {
'university': university,
'id': course_id,
'title': title,
'start_date': {
'year': start_date.year,
'month': start_date.month,
'day': start_date.day
},
'img_url': img_url,
}
def __current__(course):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
start_date = datetime.strptime(date_block[-1], '%Y-%m-%d')
university = course.find('h2', attrs={'class': 'university'}).text.strip()
id_title = course.find('section', attrs={'class': 'info'}).find('h3').find('a').text.strip().split()
course_id = id_title[0]
title = id_title[1]
img_url = full_url(course.find('img').attrs['src'])
course_info_url = full_url(course.find('a', attrs={'class': 'enter-course'}).attrs['href'])
return {
'university': university,
'id': course_id,
'title': title,
'start_date': {
'year': start_date.year,
'month': start_date.month,
'day': start_date.day
},
'img_url': img_url,
'course_info_url': course_info_url,
}
def __past__(course):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
start_date = datetime.strptime(date_block[-1], '%Y-%m-%d')
university = course.find('h2', attrs={'class': 'university'}).text.strip()
id_title = course.find('section', attrs={'class': 'info'}).find('h3').find('a').text.strip().split()
course_id = id_title[0]
title = id_title[1]
img_url = full_url(course.find('img').attrs['src'])
course_info_url = full_url(course.find('a', attrs={'class': 'enter-course'}).attrs['href'])
return {
'university': university,
'id': course_id,
'title': title,
'start_date': {
'year': start_date.year,
'month': start_date.month,
'day': start_date.day
},
'img_url': img_url,
'course_info_url': course_info_url,
}
def courses_selected(email, password):
"""
email: str
password: str
=> (courses_upcoming, courses_current, courses_past)
"""
upcoming = []
current = []
past = []
page = __get_page__(DASHBOARD, email, password)
page = BeautifulSoup(page)
for course in page.findAll('article', attrs={'class': 'my-course'}):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
if date_block[0] == u'课程开始':
upcoming.append(__upcoming__(course))
elif date_block[0] == u'课程已开始':
current.append(__current__(course))
elif date_block[0] == u'课程完成度':
past.append(__past__(course))
return (upcoming, current, past)
def courses_upcoming(email, password):
"""
email: str
password: str
=> list(course*)
"""
upcoming = []
page = __get_page__(DASHBOARD, email, password)
page = BeautifulSoup(page)
for course in page.findAll('article', attrs={'class': 'my-course'}):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
if date_block[0] == u'课程开始':
upcoming.append(__upcoming__(course))
return upcoming
def courses_current(email, password):
"""
email: str
password: str
=> list(course*)
"""
current = []
page = __get_page__(DASHBOARD, email, password)
page = BeautifulSoup(page)
for course in page.findAll('article', attrs={'class': 'my-course'}):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
if date_block[0] == u'课程已开始':
current.append(__current__(course))
return current
def courses_past(email, password):
"""
email: str
password: str
=> list(course*)
"""
past = []
page = __get_page__(DASHBOARD, email, password)
page = BeautifulSoup(page)
for course in page.findAll('article', attrs={'class': 'my-course'}):
date_block = course.find('p', attrs={'class': 'date-block'}).text.strip().split()
if date_block[0] == u'课程完成度':
past.append(__past__(course))
return past
def courses_categories():
page = __get_page__(COURSES)
page = BeautifulSoup(page)
categories = []
for item in page.find('div', attrs={'class': 'xkfl'}).findAll('a'):
cid = item.attrs['data-id']
pattern = re.compile(u'([^\(]+)\(\s*(\d+)\s*\)', re.UNICODE)
m_title = pattern.search(item.text.strip())
title = m_title.group(1)
count = int(m_title.group(2))
categories.append({
'id': cid,
'title': title,
'count': count,
})
return categories
def __bool2_str__(b):
return ('true' if b else 'false')
def courses_search(query=None, cid=None, started=False, hasTA=False, offset=0, limit=10000000):
query_dict = {
'offset': offset,
'limit': limit,
}
if query is not None:
query_dict['query'] = query.encode('utf-8')
if cid is not None:
query_dict['cid'] = cid
query_dict['started'] = __bool2_str__(started)
query_dict['hasTA'] = __bool2_str__(hasTA)
postdata = urllib.urlencode(query_dict).encode('utf-8')
page = __get_page__(SEARCH, data=postdata)
page = json.loads(page)
next_offset = int(page['next_parameters'].get('offset', '-1'))
result = []
for course in page['data']:
owner = course['owner']
university = course['org']
course_id = course['course_num']
title = course['name']
img_url = full_url(course['thumbnail'])
course_about_url = full_url(course['href'])
teacher_name = course.get('staff_name', '')
teacher_title = course.get('staff_title', '')
update_info = course['modified'] # 更新于`几天前`,str
serialized_no = course['serialized'] # 连载至第`几`讲,int, default -1
hasTA = course['hasTA'] # bool
subtitle = course['subtitle'] # 课程简介
result.append({
'owner': owner,
'university': university,
'id': course_id,
'title': title,
'img_url': img_url,
'course_about_url': course_about_url,
'teacher': {
'name': teacher_name,
'title': teacher_title,
},
'update_info': update_info,
'serialized_no': serialized_no,
'hasTA': hasTA,
'subtitle': subtitle,
})
return result, next_offset
def __extract_course_id__(url):
pattern = re.compile('/courses/(.+)/[(about)(info)]')
m_id = pattern.search(url)
return m_id.group(1)
def courses_enrollment(email, password, url, action):
course_id = __extract_course_id__(url)
postdata = {
'course_id': course_id,
'enrollment_action': action,
}
postdata = urllib.urlencode(postdata).encode('utf-8')
opener = __get_opener__(email, password)
conn = opener.open(ENROLLMENT, data=postdata)
return conn.code == 200
def __courseware_url__(about_or_info_url):
course_id = __extract_course_id__(about_or_info_url)
return BASE_URL + '/courses/' + course_id + _COURSEWARE
def courses_lectures(email, password, url):
url = __courseware_url__(url)
opener = __get_opener__(email, password)
return __ware__(opener, url, need_items=False)
def courses_lecture(email, password, url):
opener = __get_opener__(email, password)
return __items__(opener, url)
def __items__(opener, lecture_url):
raw_page = opener.open(lecture_url).read()
ptn_video = '<source type="video/mp4" src="([^&#;]+)"/>'
video_ids = re.findall(ptn_video, raw_page)
video_ids_idx = 0
page = BeautifulSoup(raw_page)
items = []
for item in page.find('ol', attrs={'id': 'sequence-list'}).findAll('li'):
item_class = item.find('a').attrs['class']
item_title = item.find('a').find('p').text.strip()
if 'seq_video' in item_class:
item_type = 'video'
get_item_url = _VIDEO2SRC + video_ids[video_ids_idx]
video_ids_idx += 1
item_urls_json = json.loads(opener.open(get_item_url).read())['sources']
item_url = {}
item_url['high-quality'] = []
for src in item_urls_json['quality20']:
item_url['high-quality'].append(src)
item_url['low-quality'] = []
for src in item_urls_json['quality10']:
item_url['low-quality'].append(src)
elif 'seq_problem' in item_class or 'seq_other' in item_class:
item_type = 'problem'
item_url = lecture_url
else:
raise AttributeError('Lecture item not consistent: %s, %s' % (item_class, lecture_url))
items.append({
'item_title': item_title,
'item_type': item_type,
'item_url': item_url,
})
return items
def __ware__(opener, url, need_items=True):
page = opener.open(url).read()
page = BeautifulSoup(page)
chapters = []
for chapter in page.findAll('div', attrs={'class': 'chapter'}):
ch_title = chapter.find('h3').text.strip()
lectures = []
for lecture in chapter.findAll('li'):
le_title = lecture.find('p').text.strip()
le_url = full_url(lecture.find('a').attrs['href'])
lecture_basis = {
'lecture_title': le_title,
'lecture_url': le_url,
}
if need_items:
lecture_basis['lecture_items'] = __items__(opener, le_url)
lectures.append(lecture_basis)
chapters.append({
'chapter_title': ch_title,
'chapter_lectures': lectures,
})
return chapters
def courses_ware(email, password, url):
url = __courseware_url__(url)
opener = __get_opener__(email, password)
return __ware__(opener, url, need_items=True)
def video_url(url):
opener = __get_opener__()
f = opener.open(url)
return f.geturl()
|
987,915 | b57f6b1366c26765a1fa2979b574d10bd9693523 | year=int(input("请输入年份"))
month=int(input("请输入月份"))
day=int(input("请输入日子"))
sum=0
i=2000
j=1
k=1
while i<year:
if i%4==0 and i%100==0 or i%400==0:
sum+=366
else:
sum+=365
i+=1
while j<month:
if j==4 or 6 or 9 or 11:
sum+=30
elif j==2:
if year%4==0 and year%100==0 or year%400==0:
sum+=29
else:
sum+=28
else:
sum+=31
j+=1
sum+=day
if sum%5==4 or 0:
print("晒网")
elif sum%5==1 or 2 or 3:
print("打渔") |
987,916 | 49a0b1a18c61bbab6408810d53d4daa341b410fb | import os
def main():
os.mkdir('request_data')
for filename in os.listdir('data'):
file = open('data/' + filename, 'r', encoding='utf=8').read()
file = file.split("\n\n")
request_string = [s for s in file if 'nif:isString' in s]
request_file = open('request_data/' + filename, 'w')
request_file.write(file[0] + '\n\n')
request_file.write(request_string[0])
if __name__ == '__main__':
main()
|
987,917 | e540ab9858ecaa0b2918661096d80fcecd888754 | import os
import mock
from xml.etree import ElementTree
from django.test import TestCase
from casexml.apps.case.mock import CaseFactory
from corehq.util.test_utils import TestFileMixin
from corehq.apps.userreports.sql import IndicatorSqlAdapter
from corehq.apps.userreports.models import StaticDataSourceConfiguration
from corehq.apps.userreports.tasks import rebuild_indicators
from corehq.form_processor.tests.utils import FormProcessorTestUtils
def _safe_text(input_value):
if input_value is None:
return ''
try:
return str(input_value)
except:
return ''
def create_element_with_value(element_name, value):
elem = ElementTree.Element(element_name)
elem.text = _safe_text(value)
return elem
class BaseICDSDatasourceTest(TestCase, TestFileMixin):
dependent_apps = ['corehq.apps.domain', 'corehq.apps.case']
file_path = ('data_sources', )
root = os.path.abspath(os.path.join(os.path.dirname(__file__), os.path.pardir))
_call_center_domain_mock = mock.patch(
'corehq.apps.callcenter.data_source.call_center_data_source_configuration_provider'
)
datasource_filename = ''
@classmethod
def setUpClass(cls):
super(BaseICDSDatasourceTest, cls).setUpClass()
cls._call_center_domain_mock.start()
cls.static_datasource = StaticDataSourceConfiguration.wrap(
cls.get_json(cls.datasource_filename)
)
cls.domain = cls.static_datasource.domains[0]
cls.datasource = StaticDataSourceConfiguration._get_datasource_config(
cls.static_datasource,
cls.domain,
)
cls.casefactory = CaseFactory(domain=cls.domain)
@classmethod
def tearDownClass(cls):
super(BaseICDSDatasourceTest, cls).tearDownClass()
cls._call_center_domain_mock.stop()
def tearDown(self):
FormProcessorTestUtils.delete_all_cases_forms_ledgers()
def _rebuild_table_get_query_object(self):
rebuild_indicators(self.datasource._id)
adapter = IndicatorSqlAdapter(self.datasource)
return adapter.get_query_object()
|
987,918 | f7adcb36b00421afc104b95431b36d3e6875cd7a | #homework
a=str(input())
b=str(input())
list=[a,b]
output=[str for str in list if a.endswith(b)]
def pstr(output,list):
if output==list:
return True
else:
return False
pstr(output,list)
'''23. Complete the solution so that it returns true if the first argument(string)
passed in ends with the 2nd argument (also a string).
Examples:
solution('abc', 'bc') # returns true
solution('abc', 'd') # returns false'''
|
987,919 | 58a6ba952501f6bf65b88fdc0197bcbf66725d24 | from pathlib import Path
class Device:
"""A Class for the state of an IntCode program"""
def __init__(self, data):
data = data.split("\n")
reg_names = "ab"
self.reg = dict(zip(list(reg_names), [0] * len(reg_names)))
self.instr_ptr = 0
self.instrs = []
for line in data:
line = line.split(" ")
for j, val in enumerate(line):
line[j] = line[j].rstrip(",")
try:
line[j] = int(val)
except ValueError:
pass
self.instrs.append(line)
def inc(self, instr):
reg_name = instr[0]
self.reg[reg_name] += 1
def tpl(self, instr):
reg_name = instr[0]
self.reg[reg_name] *= 3
def hlf(self, instr):
reg_name = instr[0]
self.reg[reg_name] /= 2
def jmp(self, instr):
jmp_size = instr[0]
self.instr_ptr += jmp_size - 1
def jie(self, instr):
reg_name = instr[0]
reg_value = self.reg[reg_name]
jmp_size = instr[1]
if (reg_value % 2) == 0:
self.instr_ptr += jmp_size - 1
def jio(self, instr):
reg_name = instr[0]
reg_value = self.reg[reg_name]
jmp_size = instr[1]
if reg_value == 1:
self.instr_ptr += jmp_size - 1
operations = {
"hlf": hlf,
"tpl": tpl,
"inc": inc,
"jmp": jmp,
"jie": jie,
"jio": jio,
}
def operate(self, op_name, instr):
op = Device.operations[op_name]
op(self, instr)
def run_prog(self, debug=False):
while 0 <= self.instr_ptr < len(self.instrs):
instr = self.instrs[self.instr_ptr]
if debug:
print(self.instr_ptr)
print(instr)
print(self.reg)
input()
self.operate(instr[0], instr[1:])
self.instr_ptr += 1
def main():
data_folder = Path(".").resolve()
data = data_folder.joinpath("input.txt").read_text()
print("Part 1:")
d = Device(data)
d.run_prog()
print(
f"Register b has the value {d.reg['b']} after running the "
+ "program with starting register values a=0, b=0"
)
print()
print("Part 2:")
d = Device(data)
d.reg["a"] = 1
d.run_prog()
print(
f"Register b has the value {d.reg['b']} after running the "
+ "program with starting register values a=1, b=0"
)
print()
if __name__ == "__main__":
main()
|
987,920 | a2344520779a8863fb84b398261e4c3e3394299a | import speedtest
test = speedtest.Speedtest()
download = test.download()
upload = test.upload()
print(f"Download Speed : {download}\n
Upload Speed : {upload}")
def Credit():
Space(9); print "#####################################"
Space(9); print "# +++ Internet Speed Test +++ #"
Space(9); print "# Script by WH173 5P1D3R #"
Space(9); print "#####################################"
Credit()
Speedtest() |
987,921 | bc3a5964370254f9763e55688a60d2a817e2675e | import numpy as np
import pandas as pd
from scipy import signal
image=np.array([[1,2,3],[4,5,6],[7,8,9]])
mask=np.array([[1/4,1/4],[1/4,1/4]])
re=signal.convolve2d(image,mask,boundary='symm',mode='valid')
# print(re)
def conv(il,m_s,i_s):
length=len(il)
il=il.reshape(length,i_s,i_s).tolist()
masklist=[]
for i in range(m_s):
masklist.append([])
for j in range(m_s):
masklist[i].append(1/m_s**2)
mask=np.array(masklist)
for i in range(length):
a=il.pop(0)
a=signal.convolve2d(a,mask,boundary='symm',mode='valid')
il.append(a)
il=np.array(il).reshape(length,(i_s-2)**2)
return il
def data_preparer(path,train_perc):
'''prepare data before training'''
'''train set processing'''
train = pd.read_csv(path+"train.csv").sample(frac=1)
label=train.pop('label') #target
train_x=np.array(train)
train_y=np.array(label)
i=28
while i>2:
train_x=conv(train_x,3,i)
print(train_x.shape)
i=i-2
print(i)
#print(train_x.shape)
# #print(len(train_x))
# divide=int(len(train_x)*train_perc)
# TrainSet=[train_x[0:divide],train_y[0:divide]]
# if train_perc==1.0:
# divide=int(len(train_x)*0.7)
# ValSet=[train_x[divide:-1],train_y[divide:-1]]
# test = pd.read_csv(path+"test.csv")
# test = ss.transform(test)
# test = pca.transform(test)
# test_x=np.array(test).tolist()
# id_list=[]
# for i in range(len(test_x)):
# id_list.append(i+1)
# TestSet=[test_x,id_list]
# '''test set processing'''
return TrainSet,ValSet,TestSet
if __name__ == '__main__':
data_preparer('../input/',1.0)
#main() |
987,922 | 6b1718be595a10a5f8172400e2926c65c12b7af5 | #!/usr/bin/python
import logging
import os
current_directory = os.getcwd()
logger = logging.getLogger('CTFsetup')
hdlr = logging.FileHandler(current_directory + '/log/setup.log')
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
def setupDatabase(database): # Set up sqlite database with appropriate tables and columns
import sqlite3
logger.info("Using database: {0}".format(database)) # log to informational
try:
conn = sqlite3.connect('database/' + database) # Setup connection to database
except Exception, e:
logger.info(e)
try:
# Create user_points table for tracking of users total points
conn.execute('''CREATE TABLE user_points (uname VARCHAR(32) NOT NULL, tot_points INT);''')
except Exception, e:
logger.info(e)
try:
# Create user_flags table to track all flags found by user
conn.execute('''CREATE TABLE user_flags (uname VARCHAR(32) NOT NULL, uuid VARCHAR(37));''')
except Exception, e:
logger.info(e)
try:
# Create user_messages table to track all messages by user
conn.execute('''CREATE TABLE user_messages (uname VARCHAR(32) NOT NULL, message VARCHAR(255));''')
except Exception, e:
logger.info(e)
try:
# Create flags tables to track flags uuid, name, whether or not it's venomous and points
conn.execute('''CREATE TABLE flags (flagname VARCHAR(32), uuid VARCHAR(37) NOT NULL, points INT NOT NULL, venomous BOOLEAN DEFAULT 0);''')
except Exception, e:
logger.info(e)
try:
# Create users table for storing of users passwords
conn.execute('''CREATE TABLE users (uname VARCHAR(32) NOT NULL, password VARCHAR(33) NOT NULL, admin VARCHAR(5) NOT NULL);''')
except Exception, e:
logger.info(e)
try:
# Create users_salt table for storing of users salt
conn.execute('''CREATE TABLE users_salt (uname VARCHAR(32) NOT NULL, salt VARCHAR(25) NOT NULL);''')
except Exception, e:
logger.info(e)
logger.info("Tables created in {0}".format(database)) # Log to informational the completion of table creation
try:
conn.commit() # Commit all changes
logger.info("Commit Completed") # Log to informational the completion
except Exception, e:
logger.info(e)
try:
conn.close() # Close connection to database
logger.info("Connection to database closed") # Log ot informational the closure of connection
except Exception, e:
logger.info(e)
# def generate_RSA(bits=2048):
# '''
# Generate an RSA keypair with an exponent of 65537 in PEM format
# param: bits The key length in bits
# Return private key and public key
# '''
#
# from Crypto.PublicKey import RSA
#
# try:
# new_key = RSA.generate(bits, e=65537)
# except Exception, e:
# logger.info(e)
#
# try:
# public_key = new_key.publickey().exportKey("PEM")
# except Exception, e:
# logger.info(e)
#
# try:
# private_key = new_key.exportKey("PEM")
# except Exception, e:
# logger.info(e)
#
# return private_key, public_key
def generate_RSA(bits=2048):
'''
Generate an RSA keypair with an exponent of 65537 in PEM format
param: bits The key length in bits
Return private key and public key
'''
from M2Crypto import RSA, BIO
new_key = RSA.gen_key(bits, 65537)
memory = BIO.MemoryBuffer()
new_key.save_key_bio(memory, cipher=None)
private_key = memory.getvalue()
new_key.save_pub_key_bio(memory)
return private_key, memory.getvalue()
def checkModules(): # Validate M2Crypto and base64 modules are installed
try:
import M2Crypto
except ImportError, e:
logger.info(e)
logger.warning("M2Crypto module failed to import. Please install.")
print('M2Crypto module failed to import. Please install.')
exit()
try:
import base64
except ImportError, e:
logger.info(e)
logger.warning("base64 module failed to import. Please install.")
print('base64 module failed to import. Please install.')
exit() |
987,923 | dcf76f797351c8027fd0f6a830004d0a351892e8 | import numpy as np
import mlrose
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn import preprocessing, datasets
import time
from random import randint
import warnings
def our_fitness_func(state):
global eval_count
fitness = ml.FourPeaks(t_pct=0.15)
eval_count += 1
return fitness.evaluate(state)
#finding the optimal parameters for rhc
def find_optimal_parameters_rhc(problem, n,name):
init_state = np.random.randint(2,size=n)
print("RHC started")
attempts=1000
iters = 10000
fitness_curve_arr = []
fitness_value =[]
for i in range( 0,25, 5):
best_state, best_fitness, fitness_curve= mlrose.random_hill_climb(problem, restarts =i,max_attempts =attempts, max_iters=iters, init_state = init_state, curve=True)
fitness_curve_arr.append(fitness_curve)
fitness_value.append( best_fitness)
print( fitness_value)
fitness_value=np.array( fitness_value)
print( fitness_curve_arr)
plt.figure()
plt.grid()
plt.plot(fitness_curve_arr[0], label ='restarts 0')
plt.plot(fitness_curve_arr[1], label ='restarts 5')
plt.plot(fitness_curve_arr[2], label ='restarts 10')
plt.plot(fitness_curve_arr[3], label ='restarts 15')
plt.plot(fitness_curve_arr[4],label ='restarts 20')
plt.xlabel( 'iterations')
plt.ylabel('fitness value ')
plt.legend()
plt.title('variation of fitness with random restarts')
plt.show()
plt.savefig(' optimal_rhc'+ name+'.png')
print("RHC done")
def four_peaks_compare_algorithms(problem ,ga_param, sa_param, rhc_param, mimic_param, name):
fitness_sa_arr = []
fitness_rhc_arr = []
fitness_ga_arr = []
fitness_mimic_arr = []
attempts =1000
iters =20000
time_sa_arr = []
time_rhc_arr = []
time_ga_arr = []
time_mimic_arr = []
for n in range(5,120,20):
fitness = mlrose.FourPeaks(t_pct=0.15)
print(n,"started")
problem = mlrose.DiscreteOpt(length = n, fitness_fn = fitness, maximize=True, max_val=2)
init_state = np.random.randint(2,size=n)
schedule = mlrose.GeomDecay( 1000, sa_param[0], 1)
st = time.time()
best_state_sa, best_fitness_sa, fitness_curve_sa = mlrose.simulated_annealing(problem, schedule = schedule, max_attempts = attempts, max_iters=iters, init_state = init_state, curve=True)
end = time.time()
sa_time = end-st
st = time.time()
best_state_rhc, best_fitness_rhc, fitness_curve_rhc = mlrose.random_hill_climb(problem, max_attempts = attempts,restarts=rhc_param[0], max_iters=iters, init_state = init_state, curve=True)
end = time.time()
rhc_time = end-st
st = time.time()
best_state_ga, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(problem, max_attempts = attempts,
max_iters=iters, curve=True, pop_size=ga_param[0], mutation_prob=ga_param[1])
end = time.time()
ga_time = end-st
st = time.time()
best_state_mimic, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(problem,pop_size=mimic_param[0], max_attempts = attempts,
max_iters=iters,keep_pct=mimic_param[1], curve=True, fast_mimic=True)
end = time.time()
mimic_time = end-st
print(mimic_time,n)
print(n,"done")
fitness_sa_arr.append(best_fitness_sa)
fitness_rhc_arr.append(best_fitness_rhc)
fitness_ga_arr.append(best_fitness_ga)
fitness_mimic_arr.append(best_fitness_mimic)
time_sa_arr.append(sa_time)
time_rhc_arr.append(rhc_time)
time_ga_arr.append(ga_time)
time_mimic_arr.append(mimic_time)
fitness_sa_arr = np.array(fitness_sa_arr)
fitness_rhc_arr = np.array(fitness_rhc_arr)
fitness_ga_arr = np.array(fitness_ga_arr)
fitness_mimic_arr = np.array(fitness_mimic_arr)
time_sa_arr = np.array(time_sa_arr)
time_rhc_arr = np.array(time_rhc_arr)
time_ga_arr = np.array(time_ga_arr)
time_mimic_arr = np.array(time_mimic_arr)
plt.figure()
plt.plot(np.arange(5,120,20),fitness_sa_arr,label='SA')
plt.plot(np.arange(5,120,20),fitness_rhc_arr,label = 'RHC')
plt.plot(np.arange(5,120,20),fitness_ga_arr, label = 'GA')
plt.plot(np.arange(5,120,20),fitness_mimic_arr, label = 'MIMIC')
plt.xlabel('Input Size')
plt.ylabel('Fitness Vaue')
plt.legend()
plt.title('Fitness Value vs. Input Size (Conti Peaks)')
plt.savefig('ContinuousPeaks_input_size_fitness.png')
plt.show()
plt.figure()
plt.plot(np.arange(5,120,20),time_sa_arr,label='SA')
plt.plot(np.arange(5,120,20),time_rhc_arr,label='RHC')
plt.plot(np.arange(5,120,20),time_ga_arr,label='GA')
plt.plot(np.arange(5,120,20),time_mimic_arr,label='MIMIC')
plt.legend()
plt.xlabel('Input Size')
plt.ylabel('Computation Time (s)')
plt.title('Computation Time vs. Input Size (Conti Peaks)')
plt.savefig('continuousPeaks_input_size_computation.png')
plt.show()
def find_optimal_parameters_ga_pop(problem, name):
print("GA Started")
population_size = [200, 500]
attempts = 1000
iters = 10000
fitness_value=[]
fitness_curve_arr =[]
for p in population_size:
best_state, best_fitness_ga, fitness_curve= mlrose.genetic_alg(problem, pop_size =p, mutation_prob = 0.001,
max_attempts =attempts, max_iters=iters, curve=True)
fitness_value.append( best_fitness_ga)
fitness_curve_arr.append( fitness_curve)
for p in population_size:
best_state, best_fitness_ga, fitness_curve= mlrose.genetic_alg(problem, pop_size =p, mutation_prob = 0.01,
max_attempts =attempts, max_iters=iters, curve=True)
fitness_value.append( best_fitness_ga)
fitness_curve_arr.append( fitness_curve)
print( fitness_value)
print( fitness_curve_arr)
fitness_value = np.array(fitness_value)
breakpoint()
plt.figure()
plt.grid()
plt.plot(fitness_curve_arr[0], label =' pop 200: mutation_prob:0.001')
plt.plot(fitness_curve_arr[1], label=' pop 500: mutation_prob:0.001')
plt.plot(fitness_curve_arr[2], label=' pop 200: mutation_prob:0.01')
plt.plot(fitness_curve_arr[3], label=' pop 500: mutation_prob:0.01')
plt.legend()
plt.xlabel( 'iterations')
plt.ylabel('fitness value ')
plt.title('variation of fitness with Mutation and population size')
plt.show()
plt.savefig(' optimal_ga'+ name+'.png')
print("GA Done")
def sa_different_schedule(problem, name, n):
fitness_curve_arr =[]
fitness_values =[]
schedule =[ mlrose.GeomDecay(),mlrose.ArithDecay(),mlrose.ExpDecay()]
init_state = np.random.randint( 2, size=n)
for s in schedule:
best_state, best_fitness, fitness_curve = mlrose.simulated_annealing(problem, schedule = s,
max_attempts = 1000, max_iters=10000, init_state = init_state, curve=True)
fitness_curve_arr.append( fitness_curve)
fitness_values.append( best_fitness)
print( fitness_values)
plt.figure()
plt.grid()
plt.plot(fitness_curve_arr[0],label='Geom')
plt.plot(fitness_curve_arr[1],label = 'Arith')
plt.plot(fitness_curve_arr[2], label = 'EXP')
plt.xlabel('iterations')
plt.ylabel('fitness values')
plt.legend()
plt.title('Fitness values vs. Different schedule')
plt.savefig(name+'sa_optimum_diff_schedules.png')
plt.show()
def find_optimal_parameters_sa(problem, n, name):
print("SA Started")
init_state = np.random.randint( 2, size=n)
decay = [0.65,0.7,0.8, 0.9, 0.95]
fitness_value=[]
fitness_curve_arr = []
for r in decay:
schedule = mlrose.GeomDecay( 10000, r, 1)
best_state, best_fitness, fitness_curve = mlrose.simulated_annealing( problem,schedule=schedule, max_attempts=2000, max_iters=100000,init_state=init_state, curve=True)
fitness_value.append( best_fitness)
fitness_curve_arr.append(fitness_curve)
fitness_value=np.array( fitness_value)
print( fitness_value)
plt.figure()
plt.grid()
plt.plot(fitness_curve_arr[0], label ='r:0.65')
plt.plot(fitness_curve_arr[1], label='r:0.7')
plt.plot(fitness_curve_arr[2], label ='r:0.8')
plt.plot(fitness_curve_arr[3], label ='r:0.9')
plt.plot(fitness_curve_arr[4], label ='r:0.95')
plt.legend()
plt.xlabel( 'iterations')
plt.ylabel('fitness value ')
plt.title('variation of fitness with various colling exponents')
plt.show()
plt.savefig(' optimal_sa'+ name+'.png')
print("SA done")
def find_optimal_parameters_mimic( problem,n, name):
print("Mimic Started")
population_size =[200, 500]
fitness_values = []
fitness_curve_arr =[]
for p in population_size:
best_state, best_fitness= mlrose.mimic( problem, pop_size=p, keep_pct=0.1, max_attempts=1000, max_iters=10000, fast_mimic=True)
fitness_values.append( best_fitness)
fitness_curve_arr.append(fitness_curve)
for p in population_size:
best_state, best_fitness= mlrose.mimic( problem, pop_size=p, keep_pct=0.2, max_attempts=1000, max_iters=10000, fast_mimic=True)
fitness_values.append( best_fitness)
fitness_curve_arr.append(fitness_curve)
for p in population_size:
best_state, best_fitness= mlrose.mimic( problem, pop_size=p, keep_pct=0.5, max_attempts=1000, max_iters=10000, fast_mimic=True)
fitness_values.append( best_fitness)
fitness_curve_arr.append(fitness_curve)
fitness_values=np.array( fitness_values)
print(fitness_values)
plt.figure()
plt.grid()
plt.plot(fitness_curve_arr[0], label =' pop 200: keep pct 0.1')
plt.plot(fitness_curve_arr[1], label=' pop 500: keep pct 0.1')
plt.plot(fitness_curve_arr[2], label=' pop 200: keep pct 0.2')
plt.plot(fitness_curve_arr[3], label=' pop 500: keep pct 0.2')
plt.plot(fitness_curve_arr[4], label=' pop 200: keep pct 0.5')
plt.plot(fitness_curve_arr[5], label=' pop 500: keep pct 0.5')
plt.legend()
plt.xlabel( 'iterations')
plt.ylabel('fitness value ')
plt.title('variation of fitness with Mutation and keep pct values ')
plt.show()
plt.savefig(' optimal_mimic_'+ name+'.png')
print("Mimic Done")
def compare_algorithms_iterations( problem, ga_param, sa_param, rhc_param, mimic_param, name, n):
attempts=1000
iters =1000
schedule = mlrose.GeomDecay( 1000 ,sa_param[0], 1)
init_state = init_state = np.random.randint( 2, size=n)
st = time.time()
print(" Started")
best_state, best_fitness_ga, fitness_curve_ga = mlrose.genetic_alg(problem, pop_size =ga_param[0], mutation_prob = ga_param[1],max_attempts =attempts, max_iters=iters, curve=True)
et =time.time()
ga_time = et-st
print("Genetic done")
st = time.time()
best_state, best_fitness_sa, fitness_curve_sa = mlrose.simulated_annealing(problem, schedule = schedule,init_state=init_state,max_attempts =attempts, max_iters=iters, curve=True)
et= time.time()
sa_time = et-st
print(" SA done")
st = time.time()
best_state, best_fitness_rhc, fitness_curve_rhc = mlrose.random_hill_climb(problem, restarts =rhc_param[0], init_state= init_state,max_attempts =attempts, max_iters=iters, curve=True)
et = time.time()
rhc_time = et-st
print(" RHC done")
st = time.time()
best_state, best_fitness_mimic, fitness_curve_mimic = mlrose.mimic(problem, pop_size =mimic_param[0], keep_pct = mimic_param[1],max_attempts =attempts, max_iters=iters, curve=True, fast_mimic=True)
et = time.time()
mimic_time = et-st
print(" ALL done ")
print( ga_time, sa_time, rhc_time,mimic_time)
plt.figure()
plt.plot(fitness_curve_sa,label='SA')
plt.plot(fitness_curve_rhc,label = 'RHC')
plt.plot(fitness_curve_ga, label = 'GA')
plt.plot(fitness_curve_mimic, label = 'MIMIC')
plt.xlabel('iterations ')
plt.ylabel('fitness values ')
plt.legend()
plt.title('fitness values vs. iterations'+ name)
#plt.savefig(name+'fitness_VS_iterations.png')
plt.show()
def compare_algorithms_tpct( ga_param, sa_param, rhc_param, mimic_param, name):
T_value = [0.1, 0.2, 0.3, 0.4, 0.5]
init_state = np.random.randint( 2, size=100)
attempts =1000
iters =1000
fitness_ga =[]
fitness_sa =[]
fitness_rhc = []
fitness_mimic = []
schedule = mlrose.GeomDecay( 1000, sa_param[0], 1)
for t in T_value:
fitness = mlrose.FourPeaks( t_pct =t)
print(t)
problem = mlrose.DiscreteOpt(length = 100, fitness_fn = fitness, maximize=True, max_val=2)
best_state, best_fitness_ga = mlrose.genetic_alg( problem, pop_size = ga_param[0], mutation_prob=ga_param[1],
max_attempts=attempts, max_iters=iters )
fitness_ga.append( best_fitness_ga)
print('ga done')
best_state, best_fitness_sa = mlrose.simulated_annealing( problem , schedule = schedule, init_state=init_state,
max_attempts=attempts, max_iters=iters)
fitness_sa.append( best_fitness_sa)
print('sa done')
best_state, best_fitness_rhc = mlrose.random_hill_climb( problem, init_state=init_state, restarts=rhc_param[0],
max_attempts=attempts, max_iters=iters)
fitness_rhc.append( best_fitness_rhc)
print('rhc done')
best_state, best_fitness_mimic = mlrose.mimic( problem, pop_size=mimic_param[0], keep_pct=mimic_param[1],
max_attempts=attempts, max_iters=iters, fast_mimic=True)
fitness_mimic.append( best_fitness_mimic)
print('loop completed')
plt.figure()
plt.xlabel(' t_pct values ')
plt.ylabel(' best fitness value ')
plt.plot( T_value, fitness_ga, label='GA')
plt.plot( T_value, fitness_sa, label='SA')
plt.plot( T_value, fitness_rhc, label='RHC')
plt.plot( T_value, fitness_mimic, label='MIMIC')
plt.legend()
plt.title( ' t_pct values variation with fitness')
plt.savefig('4peaks_tpct_fitnesss.png')
plt.show()
def compare_algorithms_func_eval( problem, ga_param, sa_param, rhc_param, mimic_param, name):
# comparing function on function evalutaions
# they all contains the best params for each algorithm
func_eval_ga= []
init_state = np.random.randint( 2, size=n)
func_eval_sa =[]
func_eval_mimic = []
func_eval_rhc= []
schedule = mlrose.GeomDecay( 1000 ,sa_param[0], 1)
for n in range( 40 , 101, 10):
fitness = mlrose.CustomFitness(our_fitness_func)
problem = mlrose.DiscreteOpt(length=n, fitness_fn=fitness, maximize=True)
eval_count = 0
best_state, best_fitness= mlrose.genetic_alg( problem, pop_size=ga_param[0], mutation_prob=ga_param[1], max_attempts=1000,max_iters =100000 )
func_eval_ga.append( eval_count)
eval_count = 0
best_state, best_fitness= mlrose.simulated_annealing( problem, schedule=schedule, max_attempts=1000 ,max_iters =100000 , init_state=init_state)
func_eval_sa.append( eval_count)
eval_count = 0
best_state, best_fitness= mlrose.mimic( problem, pop_size=mimic_param[0], keep_pct=mimic_param[1], max_attempts=1000
,max_iters =100000 )
func_eval_mimic.append( eval_count)
eval_count = 0
best_state, best_fitness = mlrose.random_hill_climb( problem, restarts=rhc_param[0], init_state=init_state, max_attempts=1000
,max_iters =100000 )
func_eval_rhc.append( eval_count)
plt.figure()
plt.plot(np.arange(40,101,10),func_eval_sa,label='SA')
plt.plot(np.arange(40,101,10),func_eval_rhc,label = 'RHC')
plt.plot(np.arange(40,101,10),func_eval_ga, label = 'GA')
plt.plot(np.arange(40,101,10),func_eval_mimic, label = 'MIMIC')
plt.xlabel('problem size ')
plt.ylabel('function evaluations')
plt.legend()
plt.title('Function evalutaions vs. Input Size (4 Peaks)')
plt.savefig(name+'func_eval_VS_input_size_fitness.png')
plt.show()
def continuous_peaks():
breakpoint()
n =100
fitness = mlrose.ContinuousPeaks( t_pct =0.15)
problem = mlrose.DiscreteOpt(length = n, fitness_fn =fitness, maximize = True, max_val =2)
ga_param = [500, 0.1]
sa_param = [0.85]
mimic_param= [500, 0.2]
rhc_param = [15]
breakpoint()
#compare_algorithms_iterations(problem, ga_param, sa_param, rhc_param, mimic_param,'continuouspeaks', n)
#compare_algorithms_tpct( ga_param, sa_param, rhc_param, mimic_param, 'continuousPeaks')
#four_peaks_compare_algorithms(problem ,ga_param, sa_param, rhc_param, mimic_param, 'continuousPeaks')
#sa_different_schedule( problem, 'continuousPeaks', 100)
#find_optimal_parameters_ga_pop(problem, 'continuousPeaks')
#find_optimal_parameters_rhc( problem, 100, 'continuousPeaks')
#find_optimal_parameters_sa( problem, 100, 'continuousPeaks')
#find_optimal_parameters_mimic( problem, 100, 'continuousPeaks')
continuous_peaks()
|
987,924 | 98ceb806a8f412afde707f2559c2ec99e709d21f | import sys
from gam.var import *
from gam import controlflow
from gam import display
from gam import gapi
from gam.gapi import directory as gapi_directory
from gam import utils
def create():
cd = gapi_directory.build()
body = {'domainAliasName': sys.argv[3], 'parentDomainName': sys.argv[4]}
print(f'Adding {body["domainAliasName"]} alias for ' \
f'{body["parentDomainName"]}')
gapi.call(cd.domainAliases(),
'insert',
customer=GC_Values[GC_CUSTOMER_ID],
body=body)
def delete():
cd = gapi_directory.build()
domainAliasName = sys.argv[3]
print(f'Deleting domain alias {domainAliasName}')
gapi.call(cd.domainAliases(),
'delete',
customer=GC_Values[GC_CUSTOMER_ID],
domainAliasName=domainAliasName)
def info():
cd = gapi_directory.build()
alias = sys.argv[3]
result = gapi.call(cd.domainAliases(),
'get',
customer=GC_Values[GC_CUSTOMER_ID],
domainAliasName=alias)
if 'creationTime' in result:
result['creationTime'] = utils.formatTimestampYMDHMSF(
result['creationTime'])
display.print_json(result)
def print_():
cd = gapi_directory.build()
todrive = False
titles = [
'domainAliasName',
]
csvRows = []
i = 3
while i < len(sys.argv):
myarg = sys.argv[i].lower()
if myarg == 'todrive':
todrive = True
i += 1
else:
controlflow.invalid_argument_exit(sys.argv[i],
'gam print domainaliases')
results = gapi.call(cd.domainAliases(),
'list',
customer=GC_Values[GC_CUSTOMER_ID])
for domainAlias in results['domainAliases']:
domainAlias_attributes = {}
for attr in domainAlias:
if attr in ['kind', 'etag']:
continue
if attr == 'creationTime':
domainAlias[attr] = utils.formatTimestampYMDHMSF(
domainAlias[attr])
if attr not in titles:
titles.append(attr)
domainAlias_attributes[attr] = domainAlias[attr]
csvRows.append(domainAlias_attributes)
display.write_csv_file(csvRows, titles, 'Domains', todrive)
|
987,925 | 4702a3f4992ab18dfc9456fb75d63fb73d0eb7d2 | from .common import json2dict
from .dicttree import DictTree
|
987,926 | 45f674f5173cb5575b8267aad794408b16b3d4b4 | from flask import Flask, Blueprint, render_template
bp = Blueprint("HomeController", __name__)
@bp.route("/")
@bp.route("/index")
def index():
return render_template("index.html") |
987,927 | 8c30f0b8e896652d302d2bf94e4921e02f7f6ebb | # coding: utf-8
import dataclasses
import typing
import serpyco
from sqlalchemy.orm.exc import NoResultFound
from guilang.description import Description
from guilang.description import Part
from guilang.description import Type
from rolling.action.base import WithResourceAction
from rolling.action.base import WithStuffAction
from rolling.action.base import get_with_resource_action_url
from rolling.action.base import get_with_stuff_action_url
from rolling.exception import ImpossibleAction
from rolling.rolling_types import ActionType
from rolling.server.link import CharacterActionLink
from rolling.server.util import with_multiple_carried_stuffs
from rolling.util import EmptyModel
if typing.TYPE_CHECKING:
from rolling.model.character import CharacterModel
from rolling.model.stuff import StuffModel
from rolling.game.base import GameConfig
from rolling.kernel import Kernel
@dataclasses.dataclass
class DropResourceModel:
quantity: typing.Optional[float] = serpyco.number_field(cast_on_load=True, default=None)
@dataclasses.dataclass
class DropStuffModel:
quantity: typing.Optional[int] = serpyco.number_field(cast_on_load=True, default=None)
class DropStuffAction(WithStuffAction):
input_model: typing.Type[DropStuffModel] = DropStuffModel
input_model_serializer = serpyco.Serializer(DropStuffModel)
def check_is_possible(self, character: "CharacterModel", stuff: "StuffModel") -> None:
if stuff.carried_by != character.id:
raise ImpossibleAction("Vous ne possedez pas cet objet")
def check_request_is_possible(
self, character: "CharacterModel", stuff: "StuffModel", input_: input_model
) -> None:
self.check_is_possible(character, stuff)
@classmethod
def get_properties_from_config(cls, game_config: "GameConfig", action_config_raw: dict) -> dict:
return {}
def get_character_actions(
self, character: "CharacterModel", stuff: "StuffModel"
) -> typing.List[CharacterActionLink]:
actions: typing.List[CharacterActionLink] = [
CharacterActionLink(
name=f"Laisser {stuff.name} ici",
link=get_with_stuff_action_url(
character_id=character.id,
action_type=ActionType.DROP_STUFF,
stuff_id=stuff.id,
query_params={},
action_description_id=self._description.id,
),
cost=self.get_cost(character, stuff),
)
]
return actions
def perform(
self, character: "CharacterModel", stuff: "StuffModel", input_: DropStuffModel
) -> Description:
def do_for_one(
character_: "CharacterModel", stuff_: "StuffModel", input__: DropStuffModel
) -> typing.List[Part]:
self._kernel.stuff_lib.drop(
stuff_.id,
world_row_i=character_.world_row_i,
world_col_i=character_.world_col_i,
zone_row_i=character_.zone_row_i,
zone_col_i=character_.zone_col_i,
)
return [Part(text=f"{stuff_.name} laissé ici")]
return with_multiple_carried_stuffs(
self,
self._kernel,
character=character,
stuff=stuff,
input_=input_,
action_type=ActionType.DROP_STUFF,
do_for_one_func=do_for_one,
title="Laisser quelque-chose ici",
success_parts=[
Part(is_link=True, go_back_zone=True, label="Retourner à l'écran de déplacements"),
Part(
is_link=True,
label="Voir l'inventaire",
form_action=f"/_describe/character/{character.id}/inventory",
classes=["primary"],
),
],
)
class DropResourceAction(WithResourceAction):
input_model: typing.Type[DropResourceModel] = DropResourceModel
input_model_serializer = serpyco.Serializer(input_model)
def check_is_possible(self, character: "CharacterModel", resource_id: str) -> None:
if not self._kernel.resource_lib.have_resource(character.id, resource_id):
raise ImpossibleAction("Vous ne possedez pas cette resource")
def check_request_is_possible(
self, character: "CharacterModel", resource_id: str, input_: input_model
) -> None:
if not self._kernel.resource_lib.have_resource(
character.id, resource_id, quantity=input_.quantity
):
raise ImpossibleAction("Vous ne possedez pas assez de cette resource")
@classmethod
def get_properties_from_config(cls, game_config: "GameConfig", action_config_raw: dict) -> dict:
return {}
def get_character_actions(
self, character: "CharacterModel", resource_id: str
) -> typing.List[CharacterActionLink]:
# TODO BS 2019-09-09: perfs
carried_resources = self._kernel.resource_lib.get_carried_by(character.id)
carried_resource = next((r for r in carried_resources if r.id == resource_id))
actions: typing.List[CharacterActionLink] = [
CharacterActionLink(
name=f"Laisser de {carried_resource.name} ici",
link=get_with_resource_action_url(
character_id=character.id,
action_type=ActionType.DROP_RESOURCE,
resource_id=carried_resource.id,
query_params={},
action_description_id=self._description.id,
),
cost=None,
)
]
return actions
def perform(
self, character: "CharacterModel", resource_id: str, input_: input_model
) -> Description:
# TODO BS 2019-09-09: perfs
carried_resources = self._kernel.resource_lib.get_carried_by(character.id)
carried_resource = next((r for r in carried_resources if r.id == resource_id))
if input_.quantity is None:
unit_trans = self._kernel.translation.get(carried_resource.unit)
return Description(
title=carried_resource.get_full_description(self._kernel),
items=[
Part(
is_form=True,
form_values_in_query=True,
form_action=get_with_resource_action_url(
character_id=character.id,
action_type=ActionType.DROP_RESOURCE,
resource_id=resource_id,
query_params={},
action_description_id=self._description.id,
),
items=[
Part(
label=f"Quantité à laisser ici ({unit_trans}) ?",
type_=Type.NUMBER,
name="quantity",
default_value=str(carried_resource.quantity),
)
],
)
],
)
self._kernel.resource_lib.drop(
character.id,
resource_id,
quantity=input_.quantity,
world_row_i=character.world_row_i,
world_col_i=character.world_col_i,
zone_row_i=character.zone_row_i,
zone_col_i=character.zone_col_i,
)
return Description(
title=f"Action effectué",
footer_links=[
Part(is_link=True, go_back_zone=True, label="Retourner à l'écran de déplacements"),
Part(
is_link=True,
label="Voir l'inventaire",
form_action=f"/_describe/character/{character.id}/inventory",
classes=["primary"],
),
],
)
|
987,928 | d4283c3a002794545865b0babd5cad25600ac1b6 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.datasets import load_wine # load_wine() -> I can't load data and target from it ?¿ :(
"""
In this sample we will try to categorize some wines of which we have some features
Unlike classification, in this case we DON'T HAVE LABELS FOR DATA (UNSUPERVISED), we have to 'find' them!
To achieve this, we will use a CENTROID BASED model: K-MEANS. This method perform 4 steps (automatically with sklearn):
1) Pick k random points as centroids (cluster centers)
2) Assign each datapoint to nearest centroid (in this case we use euclidean distance, but others coud be used)
3) Once all points are assigned, calculate each cluster centroid.
4) Repeat steps 2 and 3 with calculated centroids until none cluster changes (or max. repetitions reaches)
"""
#region LOAD DATA
'''
# To load dataset from CSV
dataset_url = 'http://mlr.cs.umass.edu/ml/machine-learning-databases/wine-quality/winequality-red.csv'
data = pd.read_csv(dataset_url, sep=';')
'''
dataset_wine = load_wine()
# Show dataset info
print('Dataset keys')
print(dataset_wine.keys())
#print(dataset_wine) # Print whole Bunch object
print(dataset_wine['DESCR']) # Description
# Obtain and show data (and target, to compare results)
data = pd.DataFrame(dataset_wine['data']) # <class 'pandas.core.frame.DataFrame'>
target = pd.DataFrame(dataset_wine['target']) # <class 'pandas.core.frame.DataFrame'>
print('Feature names')
print(dataset_wine['feature_names'])
print('\n')
print(data.head())
print('\nData shape')
print(data.shape)
print('Target shape')
print(target.shape)
#endregion load data
#region ANALYZE AND PREPARE DATA
#endregion |
987,929 | 55ff033b632720abc3401e70ef4f4bba7d0aee74 | import unittest
from kalk import Kalk
class KalkTest(unittest.TestCase):
def testOnEmptyString(self):
self.assertEqual(0, Kalk.add(""))
def testASingleNumber(self):
self.assertEqual(42, Kalk.add('42'))
def testTwoNumbers(self):
self.assertEqual(42, Kalk.add('40,2'))
def testManyNumbers(self):
self.assertEqual(42, Kalk.add('20,20,2'))
def testWithSpaces(self):
self.assertEqual(42, Kalk.add('20 , 20, 2'))
def testWithNewlines(self):
self.assertEqual(42, Kalk.add('20 \n 20\n 2'))
def testWithCommasNewlines(self):
self.assertEqual(42, Kalk.add('20 , 20\n 2'))
def testWithNegatives(self):
self.assertRaisesRegex(ValueError, "Negative numbers are not allowed: (-\d+(, )?)+",
Kalk.add, '20, -20, -10, 2')
def testWithCustomDelimiters(self):
self.assertEqual(42, Kalk.add("//#\n20,10#5\n5#2"))
if __name__ == '__main__':
unittest.main()
|
987,930 | 90c52167c2572cd841695841cf7b325985d2518d | # Generated by Django 2.0.6 on 2018-06-30 09:40
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('challenges', '0006_challenge_classes_list'),
]
operations = [
migrations.AddField(
model_name='challenge',
name='times_fail',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='challenge',
name='times_solved',
field=models.PositiveIntegerField(default=0),
),
migrations.AddField(
model_name='challenge',
name='times_tried',
field=models.PositiveIntegerField(default=0),
),
]
|
987,931 | 3a5fd6a60da46d16eabb67a2b4eb2002954ac0a2 | list = [23,2,53,1,10]
for b in list:
if b < 5:
print (b)
|
987,932 | 09c7c4a65ddc2fa970ac5a9e037b79dc7ce06f05 | """
* Copyright 2020, Departamento de sistemas y Computación, Universidad
* de Los Andes
*
*
* Desarrolado para el curso ISIS1225 - Estructuras de Datos y Algoritmos
*
*
* This program is free software: you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation, either version 3 of the License, or
* (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along withthis program. If not, see <http://www.gnu.org/licenses/>.
"""
import config as cf
import sys
import controller
from DISClib.ADT import list as lt
from DISClib.ADT import map
assert cf
"""
La vista se encarga de la interacción con el usuario
Presenta el menu de opciones y por cada seleccion
se hace la solicitud al controlador para ejecutar la
operación solicitada
"""
def printMenu():
print("Bienvenido")
print("0- Cargar información en el catálogo")
print("1- Listar las n obras más antiguas de un medio")
print("2- Listar cronológicamente los artistas para un rango de años")
print("3- Listar cronológicamente las adquisiciones")
print("4- Clasificar las obras de un artista por técnica")
print("5- Clasificar las obras por la nacionalidad de sus creadores")
print("6- Transportar obras de un departamento")
print("7- Encontrar el número total de obras para una nacionalidad")
print("8- Salir")
def printLastArtists(Artists):
LastArtists = lt.subList(Artists,lt.size(Artists)-4,3)
i = 1
for Artist in lt.iterator(LastArtists):
print(str(i) + '. Name: ' + Artist['DisplayName'] +',', 'Biography:', Artist['ArtistBio'] + '.')
i += 1
def printLastArtworks(Artworks):
LastArtworks = lt.subList(Artworks,lt.size(Artworks)-4,3)
i = 1
for Artwork in lt.iterator(LastArtworks):
print(str(i) + '. Title: ' + Artwork['Title'] +',', 'Date:', Artwork['Date'] +',',
'Medium:', Artwork['Medium'] +',', 'Classification:', Artwork['Classification'] + '.')
i+=1
#Requirement 0
def printReq0Answer(sorted_artworks,artists,n):
print('Las',str(n),'obras más antiguas son:\n')
if lt.size(sorted_artworks) > n:
sorted_artworks = lt.subList(sorted_artworks,1,n)
i = 1
for artwork in lt.iterator(sorted_artworks):
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',',
'Año:', artwork['Date'] + ',', 'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
input('Presione "Enter" para continuar.\n')
#Requirement 1
def printReq1Answer(SortedArtists,StartYear,EndYear):
if lt.size(SortedArtists) > 0:
print('Se encontró(aron)', str(lt.size(SortedArtists)), 'artista(s) entre el año',
str(StartYear), 'y', str(EndYear) + '.')
input('Presione "Enter" para continuar.')
if lt.size(SortedArtists) > 6:
print('Los primeros 3 y 3 últimos artistas encontrados fueron:\n')
i = 1
while i <= 3:
Artist = lt.getElement(SortedArtists,i)
print(str(i) + '. Nombre: ' + Artist['DisplayName'] +',', 'Año de nacimiento:', str(Artist['BeginDate']) + ',',
'Nacionalidad:', Artist['Nationality'] + ',', 'Género:', Artist['Gender'] + '.')
i += 1
print('...')
i = lt.size(SortedArtists)-2
while i <= lt.size(SortedArtists):
Artist = lt.getElement(SortedArtists,i)
print(str(i) + '. Nombre: ' + Artist['DisplayName'] +',', 'Año de nacimiento:', str(Artist['BeginDate']) + ',',
'Nacionalidad:', Artist['Nationality'] + ',', 'Género:', Artist['Gender'] + '.')
i += 1
else:
print('El(los) artista(s) encontrado(s) fue(ron):\n')
i = 1
while i <= lt.size(SortedArtists):
Artist = lt.getElement(SortedArtists,i)
print(str(i) + '. Nombre: ' + Artist['DisplayName'] +',', 'Año de nacimiento:', str(Artist['BeginDate']) + ',',
'Nacionalidad:', Artist['Nationality'] + ',', 'Género:', Artist['Gender'] + '.')
i += 1
else:
print('No se encontró ningún artista para el rango de años dado.')
input('Presione "Enter" para continuar.\n')
#Requirement 2
def printReq2Answer(SortedArtworks,StartYear,EndYear,artists):
if lt.size(SortedArtworks) > 0:
print('Se encontró(aron)', str(lt.size(SortedArtworks)), 'obra(s) entre la fecha',
str(StartYear), 'y', str(EndYear) + '.')
input('Presione "Enter" para continuar.')
if lt.size(SortedArtworks) > 6:
print('Las primeras 3 y 3 últimas obras encontradas fueron:\n')
i = 1
while i <= 3:
artwork = lt.getElement(SortedArtworks,i)
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',',
'Fecha:', artwork['DateAcquired'] + ',', 'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
print('...')
i = lt.size(SortedArtworks)-2
while i <= lt.size(SortedArtworks):
artwork = lt.getElement(SortedArtworks,i)
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',',
'Fecha:', artwork['DateAcquired'] + ',', 'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
else:
print('La(s) obra(s) encontrada(s) fue(ron):\n')
i = 1
while i <= lt.size(SortedArtworks):
artwork = lt.getElement(SortedArtworks,i)
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',',
'Fecha:', artwork['DateAcquired'] + ',', 'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
else:
print('No se encontró ninguna obra para el rango de fechas dado.')
input('Presione "Enter" para continuar.\n')
#Requirement 3
def printReq3Answer(artist, artist_info):
artist_artworks,artist_mediums,mostUsedMedium,mediumArtworks = artist_info
print('\nEl número de obras creadas por ' + artist + ' es ' + str(artist_artworks) + '.')
print('\nEl número de medios usados por ' + artist + ' en sus obras es ' + str(artist_mediums) + '.')
print('\nEl medio más usado por ' + artist + ' en sus obras es ' + str(mostUsedMedium) + '.')
input('Presione "Enter" para continuar.')
print('\nLas obras creadas con el medio más usado son: ')
if lt.size(mediumArtworks) > 6:
print('Las primeras 3 y 3 últimas obras encontradas fueron:\n')
i = 1
while i <= 3:
artwork = lt.getElement(mediumArtworks,i)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
print('...')
i = lt.size(mediumArtworks)-2
while i <= lt.size(mediumArtworks):
artwork = lt.getElement(mediumArtworks,i)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
else:
print('La(s) obra(s) encontrada(s) fue(ron):\n')
i = 1
while i <= lt.size(mediumArtworks):
artwork = lt.getElement(mediumArtworks,i)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
#Requirement 4
def printReq4Answer(art_nation,artworks_nation,sorted_nations,artists):
top10 = lt.subList(sorted_nations,1,10)
print('Nación',' '*10, 'Número de Obras')
for nation in lt.iterator(top10):
print(nation['Nation'],' '*(16-len(nation['Nation'])), nation['NumbArtworks'])
input('Presione "Enter" para continuar.')
print('\nLa información de las 3 primeras y últimas obras de',art_nation,'es la siguiente:\n')
i = 1
first_nation = lt.subList(artworks_nation,1,3)
for artwork in lt.iterator(first_nation):
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',','Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
print('...')
i = lt.size(artworks_nation)-2
last_nation = lt.subList(artworks_nation,lt.size(artworks_nation)-2,3)
for artwork in lt.iterator(last_nation):
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',','Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + '.')
i += 1
#Requirement 5
def printReq5Answer(moveDepartmentAns, department, artists,artworks_date,artworks_price):
est_price, art2trans, est_weight, price_map, date_map = moveDepartmentAns
print('\nSe realizó la estimación del cálculo de costos para mover las obras del departamento ' + department + '.')
print('\nEl total de obras a trasnportar es de ' + str(art2trans) + '.')
print('\nEl peso estimado de las obras transportadas es ' + str(round(est_weight,2)) + ' kg.')
print('\nEl precio estimado del servicio es de USD $' + str(round(est_price,2)) + '.')
input('Presione "Enter" para continuar.')
print('\nLas 5 obras más antiguas encontradas son: ')
i = 1
while i <= 5:
artwork = lt.getElement(artworks_date,i)
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artist_name = ', '.join(artists_artworks )
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artist_name +',','Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + ',', 'Costo:', str(round(artwork['EstPrice'],2)) + '.')
i += 1
input('Presione "Enter" para continuar.')
print('\nLas 5 obras más costosas encontradas son: ')
i = 1
while i <= 5:
artwork = lt.getElement(artworks_price,i)
artist_IDs = artwork['ConstituentID']
artists_artworks = controller.findArtist(artists,artist_IDs)
artists_artworks = ', '.join(artists_artworks)
print(str(i) + '. Título: ' + artwork['Title'] +',', 'Artista(s): ' + artists_artworks +',','Fecha:', artwork['DateAcquired'] + ',',
'Medio:', artwork['Medium'] + ',', 'Dimensiones:', artwork['Dimensions'] + ',', 'Costo:', str(round(artwork['EstPrice'],2)) + '.')
i += 1
#Requirement 7
def printReq7Answer(n_artworks,nationality):
print('\nEl número de obras de arte encontradas para la nacionalidad', nationality, 'es de', str(n_artworks),'obras.')
"""
Menu principal
"""
catalog = None
Artists = None
Artworks = None
list_type = None
while True:
printMenu()
inputs = input('Seleccione una opción para continuar\n')
if int(inputs[0]) == 0:
listaValida = False
while not listaValida:
list_type = int(input("Seleccione el tipo de representación de lista\n (1.) ARRAY_LIST (2.) LINKED_LIST: "))
if(list_type != 1 and list_type != 2):
print("Por favor ingrese una opción válida")
else:
listaValida = True
print("Cargando información de los archivos ....")
start_time = controller.start_endPerfTest()
catalog = controller.initCatalog(list_type)
controller.loadArtists(catalog)
controller.loadArtworks(catalog,list_type)
stop_time = controller.start_endPerfTest()
total_time = (stop_time - start_time)*1000
Artists = catalog['artists']
Artworks = catalog['artworks']
print('Total de artistas cargados: ' + str(lt.size(Artists)))
print('Total de obras cargadas: ' + str(lt.size(Artworks)))
input('Presione "Enter" para continuar.')
print('\nInformación de últimos artistas de la lista:\n')
printLastArtists(Artists)
input('Presione "Enter" para continuar.')
print('\nInformación de últimas obras de la lista:\n')
printLastArtworks(Artworks)
input('Presione "Enter" para continuar.\n')
print('\nEl tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif catalog == None:
print('Debe cargar los datos antes de seleccionar cualquier opción.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 1:
valid_medium = False
while not(valid_medium):
medium = input('Brinde el medio para el cual desea realizar el análisis: ')
if controller.encounterMedium(catalog,medium):
valid_medium = True
else:
print('Debe seleccionar un medio válido.')
input('Presione "Enter" para continuar.\n')
n = int(input('Establezca el número de obras: '))
sort_type = 5
n_artworks = controller.oldestArtworks(catalog,medium,sort_type,list_type)
start_time = controller.start_endPerfTest()
printReq0Answer(n_artworks,Artists,n)
stop_time = controller.start_endPerfTest()
total_time = (stop_time - start_time)*1000
print('\nEl tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 2:
valid_map = False
while not valid_map:
print("--Métodos de colisión")
print("1) Separate Chaining")
print("2) Linear Probing")
map_type = input("Seleccione el tipo de método de colisión a usar para el mapa: ")
valid_types = ["1","2"]
if map_type not in valid_types:
print("\nDebe seleccionar una opción válida.")
input('Presione "Enter" para continuar.\n')
else:
map_type = int(map_type)
valid_map = True
StartYear = int(input('Brinde el año inicial del rango: '))
EndYear = int(input('Brinde el año final del rango: '))
start_time = controller.start_endPerfTest()
artistsInRange = controller.ArtistsInRange(Artists,StartYear,EndYear,list_type,map_type)
SortedArtists = controller.SortChronologically(artistsInRange,StartYear,EndYear,list_type)
stop_time = controller.start_endPerfTest()
printReq1Answer(SortedArtists,StartYear,EndYear)
total_time = (stop_time - start_time)*1000
print('El tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 3:
valid_map = False
while not valid_map:
print("--Métodos de colisión")
print("1) Separate Chaining")
print("2) Linear Probing")
map_type = input("Seleccione el tipo de método de colisión a usar para el mapa: ")
valid_types = ["1","2"]
if map_type not in valid_types:
print("\nDebe seleccionar una opción válida.")
input('Presione "Enter" para continuar.\n')
else:
map_type = int(map_type)
valid_map = True
sortValido = False
while not sortValido:
sort_type = int(input("Seleccione el tipo de sort\n (1.) QuickSort (2.) Insert (3.) Shell (4.) Selection (5.) Merge: "))
if(sort_type != 1 and sort_type != 2 and sort_type != 3 and sort_type != 4 and sort_type != 5):
print("Por favor ingrese una opción válida\n")
else:
sortValido = True
StartYear = input('Brinde la fecha inicial del rango: ')
EndYear = input('Brinde la fecha final del rango: ')
start_time = controller.start_endPerfTest()
artworksInRange = controller.ArtworksInRange(Artworks,StartYear,EndYear,list_type,valid_map)
sorted_artworks = controller.SortArtworks(artworksInRange,sort_type,list_type)
stop_time = controller.start_endPerfTest()
printReq2Answer(sorted_artworks,StartYear,EndYear,Artists)
total_time = (stop_time - start_time)*1000
print('El tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 4:
valid_map = False
while not valid_map:
print("--Métodos de colisión")
print("1) Separate Chaining")
print("2) Linear Probing")
map_type = input("Seleccione el tipo de método de colisión a usar para el mapa: ")
valid_types = ["1","2"]
if map_type not in valid_types:
print("\nDebe seleccionar una opción válida.")
input('Presione "Enter" para continuar.\n')
else:
map_type = int(map_type)
valid_map = True
artist_name = input('Brinde el nombre del artista del cual desea obtener información: ')
artist_ID = controller.encounterArtist(Artists,artist_name)
if artist_ID == 'NotFound':
'No se ha encontrado el artista escogido.'
else:
start_time = controller.start_endPerfTest()
artist_info = controller.artistMediumInfo(Artworks,artist_ID,list_type,map_type)
stop_time = controller.start_endPerfTest()
printReq3Answer(artist_name,artist_info)
input('Presione "Enter" para continuar.\n')
total_time = (stop_time - start_time)*1000
print('El tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 5:
print('\nSe organizarán las obras por nacionalidad.')
valid_map = False
while not valid_map:
print("--Métodos de colisión")
print("1) Separate Chaining")
print("2) Linear Probing")
map_type = input("Seleccione el tipo de método de colisión a usar para el mapa: ")
valid_types = ["1","2"]
if map_type not in valid_types:
print("\nDebe seleccionar una opción válida.")
input('Presione "Enter" para continuar.\n')
else:
map_type = int(map_type)
valid_map = True
start_time = controller.start_endPerfTest()
artworksNationality,nations = controller.nationalityArtworks(Artworks,catalog,list_type,map_type)
sort_type = 5
sorted_nations,art_nation,artworks_nation = controller.sortNations(artworksNationality,nations,sort_type)
stop_time = controller.start_endPerfTest()
printReq4Answer(art_nation,artworks_nation,sorted_nations,Artists)
input('Presione "Enter" para continuar.\n')
total_time = (stop_time - start_time)*1000
print('El tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 6:
valid_map = False
while not valid_map:
print("--Métodos de colisión")
print("1) Separate Chaining")
print("2) Linear Probing")
map_type = input("Seleccione el tipo de método de colisión a usar para el mapa: ")
valid_types = ["1","2"]
if map_type not in valid_types:
print("\nDebe seleccionar una opción válida.")
input('Presione "Enter" para continuar.\n')
else:
map_type = int(map_type)
valid_map = True
sortValido = False
while not sortValido:
sort_type = int(input("Seleccione el tipo de sort\n (1.) QuickSort (2.) Insert (3.) Shell (4.) Selection (5.) Merge: "))
if(sort_type != 1 and sort_type != 2 and sort_type != 3 and sort_type != 4 and sort_type != 5):
print("Por favor ingrese una opción válida\n")
else:
sortValido = True
department = input('Brinde el nombre del departamento para el cual desea calcular el costo: ')
if controller.checkDepartment(Artworks,department):
start_time = controller.start_endPerfTest()
moveDepartmentAns = controller.moveDepartment(Artworks,department,map_type)
est_price, art2trans, est_weight, price_map, date_map = moveDepartmentAns
artworks_date = controller.SortArtworksByDate(date_map,sort_type,list_type)
artworks_price = controller.SortArtworksByPrice(price_map,sort_type,list_type)
stop_time = controller.start_endPerfTest()
printReq5Answer(moveDepartmentAns,department,Artists,artworks_date,artworks_price)
input('Presione "Enter" para continuar.\n')
total_time = (stop_time - start_time)*1000
print('El tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
else:
print('Debe seleccionar un departamento válido.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 7:
valid_nationality = False
while not(valid_nationality):
nationality = input('Brinde la nacionalidad para la cual desea conocer el número de obras: ')
if controller.encounterNationality(catalog,nationality):
valid_nationality= True
else:
print('Debe seleccionar una nacionalidad válida.')
input('Presione "Enter" para continuar.\n')
n_artworks = controller.countArtworksNationality(catalog,nationality)
start_time = controller.start_endPerfTest()
printReq7Answer(n_artworks,nationality)
stop_time = controller.start_endPerfTest()
total_time = (stop_time - start_time)*1000
print('\nEl tiempo usado para llevar a cabo el algoritmo es de ' + str(total_time) + ' mseg.')
input('Presione "Enter" para continuar.\n')
elif int(inputs[0]) == 8:
sys.exit(0)
else:
print('Debe seleccionar una opción válida')
input('Presione "Enter" para continuar.\n')
sys.exit(0) |
987,933 | e955297bd782fbbc01fcf275606cab0478adccb2 | """
Autor: GAÑAN, Tomas // CERIONI, Enrique
Ejercicio 2: Moneda Falsa
"""
# Importacion de librerias/modulos
import numpy as np
import random
# Eligir una moneda aleatoriamente / identificar cuál es la falsa
moneda = [0,0,0,0,0,0,0,0,0,0,0,0]
num = random.randint(0,11)
moneda[num]=1
true_coin = [0,0,0]
print("\nLAS 12 MONEDAS = ")
print(moneda)
brazo_der = [moneda[0], moneda[1], moneda[2], moneda[3]]
brazo_izq = [moneda[4], moneda[5], moneda[6], moneda[7]]
mesa = [moneda[8], moneda[9], moneda[10], moneda[11]]
print("\nBRAZO DERECHO = ")
print(brazo_der)
print("\nBRAZO IZQUIERDO =")
print(brazo_izq)
print("\nMESA = ")
print(mesa)
# PRIMER PESADA
if brazo_der == brazo_izq:
print("\nBALANZA EQUILIBRADA.")
else:
print("\nLA MONEDA SE ENCUENTRA EN UNO DE LOS BRAZOS.")
brazo_der2 = [moneda[0], moneda[9], moneda[10], moneda[11]]
brazo_izq2 = [moneda[4],moneda[1], moneda[2], moneda[3]]
mesa2 = [moneda[8], moneda[5], moneda[6], moneda[7]]
print("\nBRAZO DERECHO = ")
print(brazo_der)
print("\nBRAZO IZQUIERDO =")
print(brazo_izq)
print("\nMESA = ")
print(mesa)
# SEGUNDA PESADA
if brazo_der2 == brazo_izq2:
print("\nBALANZA EQUILIBRADA.")
else:
print("\nLA MONEDA SE ENCUENTRA EN UNO DE LOS BRAZOS.")
# ÚLTIMAS 3 MONEDAS DE CADA GRUPO
brazo_der3 = [moneda[9], moneda[10], moneda[11]]
brazo_izq3 = [moneda[1], moneda[2], moneda[3]]
mesa3 = [moneda[5], moneda[6], moneda[7]]
if brazo_der3 == brazo_izq3 and brazo_der3 == mesa3:
print("\nLA MONEDA SE ENCUENTRA EN UNA DE LAS 3 QUE NO CAMBIAMOS (INICIALES)")
# TERCER PESADA
if moneda[0] == moneda[4]:
print("LA MONEDA -> 9 ES FALSA")
elif moneda[0] == 1:
print("LA MONEDA -> 1 ES FALSA")
else:
print("LA MONEDA -> 5 ES FALSA")
else:
if brazo_der3 == true_coin and brazo_izq3 == true_coin:
print("\nLA MONEDA SE ENCUENTRA EN EL BRAZO IZQUIERDO.")
# TERCER PESADA
if moneda[5] == moneda[6]:
print("LA MONEDA -> 8 ES FALSA")
elif moneda[5] == 1:
print("LA MONEDA -> 6 ES FALSA")
else:
print("LA MONEDA -> 7 ES FALSA")
elif brazo_der3 == true_coin and mesa3 == true_coin:
print("\nLA MONEDA SE ENCUENTRA EN EL BRAZO DERECHO.")
# TERCER PESADA
if moneda[1] == moneda[2]:
print("LA MONEDA -> 4 ES FALSA")
elif moneda[1] == 1:
print("LA MONEDA -> 2 ES FALSA")
else:
print("LA MONEDA -> 3 ES FALSA")
else:
print("\nLA MONEDA SE ENCUENTRA EN EL BRAZO DERECHO.")
# TERCER PESADA
if moneda[9] == moneda[10]:
print("LA MONEDA -> 12 ES FALSA")
elif moneda[9] == 1:
print("LA MONEDA -> 10 ES FALSA")
else:
print("LA MONEDA -> 11 ES FALSA") |
987,934 | 0eb1dee9dee337b2e985aa81c14b7e01600a9127 | import scipy.stats
from scipy.special import hermite
from scipy.linalg import eigh
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.path as path
from hermite_poly import Hermite, Poly
from simple_models import simulate, VAC, well_well, makegrid, fcn_weighting, L2subspaceProj_d, OU, dot
from mpl_toolkits import mplot3d
from basis_sets import indicator
from numpy import exp,arange
from pylab import meshgrid,cm,imshow,contour,clabel,colorbar,axis,title,show
import tables as tb
'''
finding the true eigenvalues
'''
print("Finding true weightings.")
fineness = 4
endpoint = 1
dimension = 1
basis_true = [Hermite(0).to_fcn()]
basis_true = basis_true + [Hermite(n, d).to_fcn() for n in range(1,fineness) for d in range(dimension)]
truebasisSize = len(basis_true)
delta_t = .001
T = 1000
n = 1000
length = round(T / delta_t)
optimal_timeLag = .3
h5 = tb.open_file("Trajectory_Data/DW_1D_delta_t={},T={},n={}.h5".format(delta_t, T, n), 'r')
a = h5.root.data
t = np.array(a[0:80,round(length * .05):])
w_f = VAC(basis_true, t, optimal_timeLag, delta_t, dimension = dimension, update = True).find_eigen(truebasisSize)[1].T
distribution = np.hstack([a[d:d+dimension, round(length *.05):] for d in range(500,530, dimension)])
h5.close()
'''
-----------------------------------------
'''
print("Done with finding true weightings.")
fineness = 6
endpoint = 1.8
basis = [Hermite(n).to_fcn() for n in range(fineness)]
basis = [indicator(fineness, endpoint, center = i).to_fcn() for i in makegrid(endpoint, dimension = dimension, n = fineness)]
basisSize = len(basis)
delta_t = .001
T = 1000
n = 1000
length = round(T / delta_t)
h5 = tb.open_file("Trajectory_Data/DW_1D_delta_t={},T={},n={}.h5".format(delta_t, T, n), 'r')
a = h5.root.data
t = np.array(a[100:104,round(length * .05):round(length * 1)])
h5.close()
time_lag = np.hstack((np.linspace(delta_t, 1, 10)))
print("Now getting eigenvalues.")
evs = [VAC(basis, t, l, delta_t, dimension = dimension, update = True).find_eigen(basisSize) for l in time_lag]
print("Calculating Phi's")
"Number of eigenfunctions to compare. Must be less than basisSize."
m = 3
Phi_g = np.array([f(distribution) for f in basis])
Phi_f = np.array([f(distribution) for f in basis_true])
print("Now calculating error.")
eigen_dist = [ev[0][basisSize - m] - ev[0][basisSize - m - 1] for ev in evs]
error = [L2subspaceProj_d(w_f = w_f[truebasisSize - m:], w_g = ev[1].T[basisSize - m:][::-1],
distribution = distribution, Phi_f = Phi_f, Phi_g = Phi_g)
for ev in evs]
print("Now plotting some graphs.")
plt.plot(eigen_dist, error)
plt.xlabel("Distance to nearest eigenvalue")
plt.ylabel("Error in estimated subspaces")
plt.title("Error in estimation with varying time lags (DW, 1D)")
print([time_lag[i] for i in range(len(eigen_dist)) if eigen_dist[i] == max(eigen_dist)])
ev = [[ev[0][i] for ev in evs] for i in range(m-1)]
[plt.plot(time_lag, ev[i]) for i in range(m-1)]
plt.xlabel("Time Lag")
plt.ylabel("Eigenvalues")
plt.title("Eigenvalues vs. Time Lag (OU, 1-D)")
"""
The third eigenvalue is well approximated until around .41 seconds of time lag,
then the approximation gets dramatically worth.
"""
"""
CODE FOR PLOTTING BELOW
"""
ev = evs[0]
estimated = [fcn_weighting(basis, v) for v in ev[1].T][::-1]
true = [fcn_weighting(basis_true, v) for v in w_f][::-1]
if dimension == 1:
z = np.linspace(-1.5,1.5,20)
w = [h(z) for h in estimated]
y = [h(z) for h in true]
#
plt.plot(z,w[0], "-r", label = "First")
plt.plot(z,w[1], "-b", label = "Second")
plt.plot(z,w[2], "-g", label = "Third")
# plt.plot(z[0],w[3], "-g", label = "Third")
# #
plt.plot(z,y[0], "-r", label = "First")
plt.plot(z,y[1], "-b", label = "Second")
plt.plot(z,y[2], "-g", label = "Third")
# # plt.plot(z[0],y[3], "-g", label = "Fourth")
#
# plt.legend()
# plt.show()
if dimension == 2:
d1 , d2 = [np.linspace(-1.8, 1.8, 10), np.linspace(-1.8, 1.8, 10)]
y, x = np.meshgrid(d1, d2)
w = [np.array([[h(np.vstack([a,b])) for a in d1] for b in d2]) for h in estimated]
# v = [np.array([[h(np.vstack([a,b])) for a in d1] for b in d2]) for h in true]
# x and y are bounds, so z should be the value *inside* those bounds.
# Therefore, remove the last value from the z array.
z = w[7]
z = z[:-1, :-1]
z_min, z_max = -np.abs(z).max(), np.abs(z).max()
fig, ax = plt.subplots()
c = ax.pcolormesh(x, y, z, cmap='RdBu', vmin=z_min, vmax=z_max)
ax.set_title('pcolormesh')
# set the limits of the plot to the limits of the data
ax.axis([x.min(), x.max(), y.min(), y.max()])
fig.colorbar(c, ax=ax)
plt.show()
|
987,935 | ee2b1d9b8f190b2faba4237f266a4741262d5869 | #Alina Omorbekova
#В магазине есть список разных продуктов, у каждого продукта есть
# название, цена, уникальный номер. Сперва пользователю нужно отобразить
# весь список продуктов с их информацией, после нужно сказать чтобы он
# ввел название товара, если такой товар есть предложить пользователю
# купить этот товар, и ввести сумму если введенная сумма меньше цены
# которая указана на товар то нужно уведомить его что у вас не хватает
# денег чтобы купить, иначе сказать ему что вы получили товар.
def grocery_store(list_of_groceries, list_of_prices):
print('Список продуктов:', list_of_groceries)
print('Цены продуктов: ', list_of_prices)
inpt = input('Введите название товара: ')
for i in list_of_groceries:
if inpt in list_of_groceries:
indx = list_of_groceries.index(inpt)
print('Хотите совершить покупку?')
money = int(input('Введите сумму: '))
cost_of_apple = int(list_of_prices[indx])
if money >= int(cost_of_apple):
print('Вы успешно совершили покупку!')
break
else:
print('У вас не хватает средств!')
list_of_groceries = ('apples', 'bread', 'ramen', 'strawberries')
list_of_prices = (150, 80, 240, 450)
grocery_store(list_of_groceries, list_of_prices)
|
987,936 | 960de71e49255acbeb95764a31503d27dafafa83 | import logging
import requests
import xmltodict
try:
from urllib.parse import urljoin
except ImportError:
from urlparse import urljoin
LOGGER = logging.getLogger('cisco_olt_http.client')
class Client(object):
def __init__(self, base_url):
'''
:param base_url: OLT box API base url.
'''
self.base_url = base_url
self.session = requests.Session()
# token is incremented before each operation
self._token = -1
@property
def token(self):
'''Operation token which is incremented before each use'''
self._token += 1
return self._token
def login(self, username, password):
'''
Initiate authenticated session with given credentials
:param usernam: Username
:param password: Password
:returns: Login request's response
'''
login_data = {
'myusername': username,
'mypassword': password,
'button': 'Login', 'textfield': 'UX_EQUIPNAME',
}
response = self._req('login.htm', data=login_data)
return response
def execute(self, op, **kwargs):
'''
Execute API request operation with given operation ``data``.
:param op: Operation class
:type op: class (type)
:param data: Operation related data passed
:type data: dict or None
:returns: OperationResult
'''
return op(self).execute(**kwargs)
def _req(self, url, method='POST', **options):
url = urljoin(self.base_url, url)
LOGGER.debug('Request to: %s with options: %s', url, options)
response = self.session.request(method, url, **options)
response.raise_for_status()
LOGGER.debug(
'Response status: %s content: %s',
response.status_code, response.content)
return response
|
987,937 | aca47ed5259a4a36a4b0fc8cf0b62ccf9c0086a7 | import numpy as np
import pandas as pd
#import matplotlib.pyplot as plt
#import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.metrics import r2_score
from sklearn.metrics import mean_squared_error
from analysis_util import *
import networkx as nx
k = 7 # k best candidates
#src_graph = 'soc-karate/soc-karate.txt'
#src_graph = 'soc-dolphins/soc-dolphins.txt'
#src_graph = 'rt-retweet/rt-retweet.txt'
#src_graph = 'soc-firm-hi-tech/soc-firm-hi-tech.txt'
#src_graph = 'socfb-Reed98/socfb-Reed98.txt'
#src_graph = 'socfb-Caltech36/socfb-Caltech36.txt'
#src_graph = 'socfb-Simmons81/socfb-Simmons81.txt'
src_graph = 'soc-wiki-vote/soc-wiki-vote.txt'
#src_graph = 'rt-twitter-copen/rt-twitter-copen.txt'
#src_graph = 'socfb-Haverford76/socfb-Haverford76.txt'
#infile = 'soc-karate/soc-karate_all_exactHawkes_labeled.txt'
#infile = 'soc-dolphins/soc-dolphins_all_exactHawkes_labeled.txt'
infile = 'soc-wiki-vote/soc-wiki-vote_all_exactHawkes_labeled.txt'
#infile = 'soc-firm-hi-tech/soc-firm-hi-tech_all_exactHawkes_labeled.txt'
#infile = 'rt-retweet/rt-retweet_all_exactHawkes_labeled.txt'
#infile = 'rt-twitter-copen/rt-twitter-copen_all_exactHawkes_labeled.txt'
#infile = 'socfb-Reed98/socfb-Reed98_all_exactHawkes_labeled.txt'
#infile = 'socfb-Caltech36/socfb-Caltech36_all_exactHawkes_labeled.txt'
#infile = 'socfb-Simmons81/socfb-Simmons81_all_exactHawkes_labeled.txt'
#infile = 'socfb-Haverford76/socfb-Haverford76_all_exactHawkes_labeled.txt'
#parafile = 'socfb-Reed98/socfb-Reed98_linear_coef.txt'
#parafile = 'socfb-Caltech36/socfb-Caltech36_linear_coef.txt'
#parafile = 'socfb-Simmons81/socfb-Simmons81_linear_coef.txt'
parafile = 'soc-wiki-vote/soc-wiki-vote_linear_coef.txt'
#parafile = 'rt-twitter-copen/rt-twitter-copen_linear_coef.txt'
#parafile = 'socfb-Haverford76/socfb-Haverford76_linear_coef.txt'
df = pd.read_csv(infile, header=None, sep=' ')
X = df.iloc[:,0:46].values
y = df[46].values
l = [e for e in X.flatten() if e == 0.]
num_zeros = len(l) / len(X.flatten())
print("num of zeros in X: %.3f\n" % num_zeros)
df_x = pd.DataFrame(X)
df_x = df_x.apply(log_freq_count, axis=1)
X = df_x.values
y = np.log(y)
#sns.distplot(y)
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, shuffle=False, random_state=123)
lr = LinearRegression()
lr.fit(X_train, y_train)
y_pred = lr.predict(X_test)
# write coef
with open(parafile, 'w') as file:
file.write('%s\n' % (lr.intercept_))
for s in lr.coef_:
file.write('%s\n' % (s))
r2 = r2_score(y_test, y_pred)
mse = mean_squared_error(y_test, y_pred)
print("r2 = %f" % r2)
print("mse = %f" % mse)
#plt.scatter(list(range(0, len(y))), y)
#plt.scatter(list(range(0, len(X_test))), y_pred, label='pred')
#plt.scatter(list(range(0, len(X_test))), y_test, label='true')
#plt.legend(loc=0)
#plt.scatter(y_test, y_pred, alpha=0.5)
#plt.xticks(np.arange(0,3))
#plt.yticks(np.arange(0,3))
#plt.xlabel('exact hawkes event counts')
#plt.ylabel('predict event counts')
#plt.plot(np.arange(0, 3,0.1), np.arange(0,3,0.1), 'r')
#plt.show()
"""
G = nx.read_edgelist(src_graph)
n = len(list(G.nodes))
df1 = df.iloc[0:n, :]
knode_predict, knode_true, score_predict, score_true = find_k_best(k, lr, df1)
print('\n')
print('----- Predict -----')
print(knode_predict)
print(score_predict)
print('----- Hawkes ------')
print(knode_true)
print(score_true)
val_map1 = dict(zip(knode_predict, np.repeat(1, k)))
val_map2 = dict(zip(knode_true, np.repeat(1, k)))
values1 = [val_map1.get(np.int(node), 0.25) for node in G.nodes()]
values2 = [val_map2.get(np.int(node), 0.25) for node in G.nodes()]
p = []
for i in range(0, n):
p.append(tuple(np.random.rand(2)))
pos = dict(zip(G.nodes, p))
plt.figure(1)
plt.title('Predict')
nx.draw(G, pos=pos, cmap=plt.get_cmap('rainbow'), node_color=values1, alpha=0.5, with_labels=True)
plt.figure(2)
plt.title('Hawkes')
nx.draw(G, pos=pos, cmap=plt.get_cmap('rainbow'), node_color=values2, alpha=0.5, with_labels=True)
plt.show()
"""
|
987,938 | fcfc71eac1cc42b582305e7e8db9222b8bc88d09 | import sys
def calculate_growth(capital, growth, years):
return capital * (growth ** years)
def calculate_growth_on_consist_invest(capital, investment, growth, years):
accumulated_capital = calculate_growth(capital, growth, years)
original_capital = capital
for i in range(years - 1, 0, -1):
grown_investment = calculate_growth(investment, growth, i)
accumulated_capital += grown_investment
original_capital += investment
print(f"{investment} grew to {grown_investment} in {i} time units")
return accumulated_capital, original_capital
def main():
capital = float(sys.argv[1])
investment = float(sys.argv[2])
growth = float(sys.argv[3])
years = int(sys.argv[4])
accumulated_capital, original_capital = calculate_growth_on_consist_invest(
capital, investment, growth, years
)
capital_growth = accumulated_capital / original_capital
print(f"original_capital: {original_capital}")
print(f"accumulated_capital: {accumulated_capital}")
print(f"capital_growth: {capital_growth}")
if __name__ == '__main__':
main()
|
987,939 | 86d4c3ba585ceeeea124a1970dc53e34861a3d28 | import os
import dnacauldron as dc
repo = dc.SequenceRepository()
files = ["RFP_GFP_plasmid_parts.fa", "RFP_GFP_plasmid_BOs.fa"]
repo.import_records(files=files)
plan = dc.AssemblyPlan.from_spreadsheet(path="assembly_plan.csv")
simulation = plan.simulate(repo)
stats = simulation.compute_stats()
simulation.write_report("output/")
print ("Done! see output/ folder for the results.") |
987,940 | e9f022cd03dcb110d14d60c64f746f2800d2be20 | import random
import numpy as np
import torchvision.transforms as tt
from collections import deque
import PIL.Image
import cv2
class Seed():
def __init__(self):
self.seeds = deque([])
self.pointer = -1
self.new_seed_flag = True
def new_seed(self):
if self.new_seed_flag is False:
self.seeds = deque([])
self.pointer = -1
self.seeds.append(random.random())
self.pointer += 1
self.new_seed_flag = True
self.saved_seeds = self.seeds.copy()
return self.seeds[-1]
def pop_seed(self):
self.new_seed_flag = False
return self.seeds.popleft()
def get(self, mode):
if mode == 'preview':
self.pointer -= 1
return self.saved_seeds[self.pointer]
elif mode == 'train':
return self.new_seed()
elif mode == 'binary':
return self.pop_seed()
class Fixed_seed():
"""
Same seed for all image augmentation in same batch
"""
def __init__(self):
self.generate_seeds()
def generate_seeds(self, n=10):
self.seeds = []
for i in range(n):
self.seeds.append(random.random())
def get_seed(self, i):
return self.seeds[i]
class No_augmentation():
def __init__(self, image):
self.x = image.x
self.y = image.y
def perform_augmentation(self, image, mode):
return image
def get_crop_dim(self):
return self.x, self.y
class Image_augmentation():
def __init__(self, image):
self.seed = image.augmentation_seed
self.x = image.x
self.y = image.y
self.fixed_seed = image.fixed_seed
self.generate_augmentation(image)
def generate_augmentation(self, image):
self.augmentations = []
# Cropping
self.augmentations.append(Crop(image.x, image.y, fixed_seed1=self.fixed_seed.get_seed(0),
fixed_seed2=self.fixed_seed.get_seed(1), seed=self.seed))
self.crop_x = self.augmentations[-1].crop_size_x
self.crop_y = self.augmentations[-1].crop_size_y
# Flipping
#self.augmentations.append(Vertical_flip(seed=self.seed, prob=0.5))
#self.augmentations.append(Horisontal_flip(seed=self.seed, prob=0.5))
# Changing brightness
#self.augmentations.append(Adjust_Brigthness(seed=self.seed))
# Changing saturation
#self.augmentations.append(Adjust_Saturation(seed=self.seed))
# Combining set augmentations
self.augmentations = Combined_augmentations(self.augmentations)
def perform_augmentation(self, image, mode):
self.mode = mode
if self.mode == None:
self.get_mode(image)
return self.augmentations(image, self.mode)
def get_mode(self, image, preview=False):
"""
Returns either 'train' or 'test' depending on the image comming in.
3 channel images are train images, thus 'train' is returned.
1 channel images are binary image, thus 'test' is returned.
'preview' is returned if augmentation is for visualization purpose,
thus neither creating nor removing seeds.
"""
if preview:
self.mode = 'preview'
elif len(image.shape) == 3:
self.mode = 'train'
else:
self.mode = 'binary'
def get_crop_dim(self):
return self.crop_x, self.crop_y
class Combined_augmentations(object):
def __init__(self, augmentations):
self.augmentations = augmentations
def __call__(self, image, mode):
for augmentation in self.augmentations:
image = augmentation.perform(image, mode)
return image
class No_action(object):
def __init__(self):
pass
def perform(self, image, mode):
return image
class Vertical_flip(object):
def __init__(self, seed, prob=0.5):
self.seed = seed
self.prob = prob
def perform(self, image, mode):
random.seed(self.seed.get(mode))
if random.random() > self.prob:
return image[::-1, :]
else:
return image
class Horisontal_flip(object):
def __init__(self, seed, prob=0.5):
self.seed = seed
self.prob = prob
def perform(self, image, mode):
random.seed(self.seed.get(mode))
if random.random() > self.prob:
return image[:, ::-1]
else:
return image
class Adjust_Brigthness(object):
def __init__(self, seed):
self.seed = seed
def perform(self, image, mode):
"""
Changes brightness in image relative to each pixel +- 75% of distance to lowest or
highest possible value.
If current brightness is 200 [0; 255], then +-75% would be in range 200 +- (255-200)*value
"""
random.seed(self.seed.get(mode))
if mode == 'binary':
return image
# gets random number between -0.75 and 0.75 from normal distribution
change_in_brightness = max(-1, min(1, random.normalvariate(0, 0.33)))
change_in_brightness = -0.6
hsv_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_BGR2HSV)
max_change_per_pixel = (np.full((len(image[:, 0, 0]), len(image[0, :, 0])), 255) - hsv_image[:, :, 2])
min_change_per_pixel = hsv_image[:, :, 2]
change_per_pixel = np.minimum(min_change_per_pixel, max_change_per_pixel) * change_in_brightness
hsv_image[:, :, 2] = hsv_image[:, :, 2] + change_per_pixel
image = np.round(cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)).astype(np.uint8)
return image
class Adjust_Saturation(object):
def __init__(self, seed):
self.seed = seed
def perform(self, image, mode):
"""
Changes brightness in image relative to each pixel +- 75% of distance to lowest or
highest possible value.
If current brightness is 200 [0; 255], then +-75% would be in range 200 +- (255-200)*value
"""
random.seed(self.seed.get(mode))
if mode == 'binary':
return image
# gets random number between -0.5 and 0.5 from normal distribution
change_in_saturation = max(-0.5, min(0.5, random.normalvariate(0, 0.167)))
change_in_saturation = 0.35
hsv_image = cv2.cvtColor(image.astype(np.float32), cv2.COLOR_BGR2HSV)
max_change_per_pixel = (np.full((len(image[:, 0, 0]), len(image[0, :, 0])), 1) - hsv_image[:, :, 1])
min_change_per_pixel = hsv_image[:, :, 1]
change_per_pixel = np.minimum(min_change_per_pixel, max_change_per_pixel) * change_in_saturation
hsv_image[:, :, 1] = hsv_image[:, :, 1] + change_per_pixel
image = np.round(cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR)).astype(np.uint8)
return image
class Crop(object):
def __init__(self, old_size_x, old_size_y, fixed_seed1, fixed_seed2, seed):
self.old_size_x = old_size_x
self.old_size_y = old_size_y
self.size_list_x = [int(self.old_size_x/4), int(self.old_size_x/2), int(self.old_size_x*(3/4))]
self.size_list_y = [int(self.old_size_y/4), int(self.old_size_y/2), int(self.old_size_y*(3/4))]
self.size_list_x = [int(self.old_size_x*(3/4))]
self.size_list_y = [int(self.old_size_y/2)]
random.seed(fixed_seed1)
self.crop_size_x = random.choice(self.size_list_x)
random.seed(fixed_seed2)
self.crop_size_y = random.choice(self.size_list_y)
self.seed = seed
def perform(self, image, mode):
random.seed(self.seed.get(mode))
self.i = random.randint(0, self.old_size_x - self.crop_size_x)
random.seed(self.seed.get(mode))
self.j = random.randint(0, self.old_size_y - self.crop_size_y)
return_img = image[self.i:self.i+self.crop_size_x,
self.j:self.j+self.crop_size_y]
return return_img |
987,941 | 4f693fdea3a9af3ca34bf4705922c04b42f11322 | class Packet:
def __init__(self, data):
self.event = data[0]
self.time = float(data[1])
self.from_node = data[2]
self.to_node = data[3]
self.pkt_type = data[4]
self.pkt_size = int(data[5])
self.flow_id = data[7]
self.source_addr = data[8]
self.dest_addr = data[9]
self.seq_number = data[10]
self.pkt_id = data[11]
with open('../trace_files/reno_reno2/reno_reno_8.tr') as f:
content = f.readlines()
pkts_rcvd1 = 0
pkts_rcvd2 = 0
start_time_1 = -1
start_time_2 = -1
end_time_1 = 0
end_time_2 = 0
set1 = set()
set2 = set()
for line in content:
packet = Packet(line.split())
if packet.pkt_type == "tcp" and packet.event == "+":
if packet.flow_id == "2":
set1.add(packet.seq_number)
if start_time_1 == -1:
start_time_1 = packet.time
else:
set2.add(packet.seq_number)
if start_time_2 == -1:
start_time_2 = packet.time
if packet.pkt_type == "ack" and packet.event == "r":
if packet.flow_id == "2" and set1.__contains__(packet.seq_number):
pkts_rcvd1 += 1
end_time_1 = packet.time
set1.remove(packet.seq_number)
elif packet.flow_id == "3" and set2.__contains__(packet.seq_number):
pkts_rcvd2 += 1
end_time_2 = packet.time
set2.remove(packet.seq_number)
tp1 = (pkts_rcvd1 * 1040 * 8) / (end_time_1 - start_time_1) / 1048576
tp2 = (pkts_rcvd2 * 1040 * 8) / (end_time_2 - start_time_2) / 1048576
print "Throughput1:::", tp1
print "Throughput2:::", tp2
|
987,942 | 6b7f874afc11b420613200c654a1acd976037f2b | from django.shortcuts import render
# Create your views here.
from .models import (
CarModel,
SelectionServices,
RequestUser,
DeleteRequest,
DiscountCode,
Comment
)
from .serializers import (
GetCarModelSerializer,
GetSelectionServicesSerializer,
PostDeleteRequest,
PostDiscountCode,
PostRequestUser,
PostSatisfactionUser,
PostDoIt,
GetRequestUser,
GetRequestUserForUpdate,
GetRequestUsers
)
from rest_framework import generics, status
from rest_framework.response import Response
from django_filters.rest_framework import DjangoFilterBackend
from djoser import permissions
from rest_framework.views import APIView
class GetCarModel(APIView):
def get(self, request):
queryset = CarModel.objects.all()
serializer_class = GetCarModelSerializer(
queryset, many=True, context={'request': request})
return Response(serializer_class.data, status=status.HTTP_200_OK)
class GetSelectionServices(generics.ListAPIView):
serializer_class = GetSelectionServicesSerializer
queryset = SelectionServices.objects.all()
filter_backends = [DjangoFilterBackend]
filter_fields = ["name_car"]
class PostDetailUser(generics.ListCreateAPIView):
serializer_class = PostRequestUser
queryset = RequestUser.objects.all()
class GetSatisfactionUser(generics.ListAPIView):
serializer_class = PostSatisfactionUser
queryset = Comment.objects.all()
class PutSatisfactionUser(generics.ListCreateAPIView):
queryset = Comment.objects.all()
serializer_class = PostSatisfactionUser
class GetRequestUser(generics.ListAPIView):
queryset = RequestUser.objects.all()
serializer_class = GetRequestUser
filter_backends = [DjangoFilterBackend]
filter_fields = ["doit"]
class GetRequestUserForUser(generics.ListAPIView):
queryset = RequestUser.objects.all()
serializer_class = GetRequestUsers
filter_backends = [DjangoFilterBackend]
filter_fields = ["author"]
class DeleteRequestUser(APIView):
def delete(self, request, pk):
queryset = RequestUser.objects.get(pk=pk)
serializer_class = PostRequestUser
queryset.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class DeleteReasonUser(generics.ListCreateAPIView):
queryset = DeleteRequest.objects.all()
serializer_class = PostDeleteRequest
class CreateCode(generics.ListCreateAPIView):
queryset = DiscountCode.objects.all()
serializer_class = PostDiscountCode
class PutCode(APIView):
def put(self, request, pk):
queryset = DiscountCode.objects.get(pk=pk)
serializer_class = PostDiscountCode(
queryset, data=request.data, context={'request': request})
if serializer_class.is_valid():
serializer_class.save()
return Response(serializer_class.data, status=status.HTTP_201_CREATED)
return Response(serializer_class.error, status=status.HTTP_400_BAD_REQUEST)
class GetUserCode(generics.ListAPIView):
queryset = DiscountCode.objects.all()
serializer_class = PostDiscountCode
filter_backends = [DjangoFilterBackend]
filter_fields = ["user"]
class GetUserCodeForAdmin(generics.ListAPIView):
queryset = DiscountCode.objects.all()
serializer_class = PostDiscountCode
class GetDeleteReasonUser(generics.ListAPIView):
queryset = DeleteRequest.objects.all()
serializer_class = PostDeleteRequest
filter_backends = [DjangoFilterBackend]
filter_fields = ["count"]
class PutRequestUserForAdmin(APIView):
def put(self, request, pk):
queryset = RequestUser.objects.get(pk=pk)
serializer_class = GetRequestUserForUpdate(
queryset, data=request.data, context={'request': request})
if serializer_class.is_valid():
serializer_class.save()
return Response(serializer_class.data, status=status.HTTP_201_CREATED)
return Response(serializer_class.error, status=status.HTTP_400_BAD_REQUEST)
|
987,943 | f87c3e8ce7a545fa0f162e4adbdd81a29f0f4d85 | import sys
sys.stdout = open('a_big.out', 'w')
sys.stdin = open("a_big.in", 'r')
sys.setrecursionlimit(1500)
def empty(row):
for c in row:
if c != '?':
return False
return True
def filled_out(row):
for c in row:
if c == '?':
return False
return True
def should_fill(row):
a, b = 0, 0
for c in row:
if c == '?':
a += 1
else:
b += 1
return (a != 0 and b != 0)
def fill_out(row):
first = 0
for i in range(len(row)):
#print i
if row[i] != '?':
for j in range(first, i):
row[j] = row[i]
first = i
while i + 1 < len(row) and row[i + 1] == '?':
row[i + 1] = row[first]
i = i + 1
first = i + 1
return row
def algorithm(grid):
#print grid
can_fill = True
while can_fill:
can_fill = False
for row in grid:
if should_fill(row):
can_fill = True
fill_out(row)
some_empty = True
while some_empty:
some_empty = False
for i, row in enumerate(grid):
if empty(row):
some_empty = True
if i > 0:
if not empty(grid[i - 1]):
grid[i] = grid[i - 1]
elif i < len(grid) - 1:
grid[i] = grid[i + 1]
else:
grid[i] = grid[i + 1]
return
def solve():
R, C = map(int, raw_input().split())
grid = []
for _ in range(R):
grid.append(list(raw_input().strip()))
assert len(grid[-1]) == C
algorithm(grid)
return '\n' + '\n'.join(map(lambda r: "".join(r), grid))
T = int(raw_input())
for i in range(1, T + 1):
ans = solve()
print "Case #" + str(i) + ": " + str(ans) |
987,944 | 6d70d7322e6e462bd2092bc9d8b5b71f0a36cbe7 | import os
from torchvision import transforms
from torch.utils import data
from .image_utils import imageLoader, is_image_file, make_dataset
class ImageDataset(data.Dataset):
def __init__(self, root, transform=None, smalltransform=None,
toTensor=None, loader=imageLoader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise Exception('Fond 0 images in ' + root)
self.root = root
self.imgs = imgs
if transform == None:
raise Exception('transform is None')
if smalltransform == None:
raise Exception('smalltransform is None')
if toTensor == None:
raise Exception('toTensor is None')
self.transform = transform
self.smalltransform = smalltransform
self.toTensor = toTensor
self.loader = loader
def __getitem__(self, idx):
path = self.imgs[idx]
img = self.loader(path, mode='YCbCr')
img = self.transform(img)
img_small = self.smalltransform(img)
img = self.toTensor(img)
img_small = self.toTensor(img_small)
return img, img_small
def __len__(self):
return len(self.imgs)
class testImageDataset(data.Dataset):
def __init__(self, root):
imgs = make_dataset(root)
if len(imgs) == 0:
raise Exception('Fond 0 images in ' + root)
self.root = root
self.imgs = imgs
self.transform = transforms.ToTensor()
self.loader = imageLoader
def __getitem__(self, idx):
path = self.imgs[idx]
_, img_name = os.path.split(path)
img_name, _ = os.path.splitext(img_name)
img = self.loader(path, mode='RGB')
img = self.transform(img)
return img, img_name
def __len__(self):
return len(self.imgs)
|
987,945 | e8748d9125c2012d250b1c7d6273f50314a3bfcb | from inverse_kinematics.InverseKinematics import *
torch.manual_seed(1510)
sample_rate = 12
selected = get_fnames(["walk"])
data = parse_selected(selected, sample_rate=sample_rate, limit=1000)
X, y = gather_all_np(data)
X = X[:, :(X.shape[1] - 3)]
dummy_joints, dummy_pose = dummy()
# excluded = ['lfingers', 'lthumb', 'ltoes', 'rfingers', 'rthumb', 'rtoes']
excluded = ['root', 'lfingers', 'lthumb', 'ltoes', 'rfingers', 'rthumb', 'rtoes', 'rhand', 'lhand', 'rfoot', 'lfoot', 'head', 'rwrist', 'lwrist', 'rclavicle', 'lclavicle']
included, indices = exclude(excluded, return_indices=True, root_exclude=[1])
steps, lr = 1000, 5e-3
nfprior = ('normalizingflows', nf_prior(compute_NF(X, steps=steps, indices=indices, lr=lr)))
# goal_joints = ['rfoot']
# pose = {'rfemur': [40, 0, 0]}
goal_joints = ['rfoot', 'lfoot']
pose = {'rfemur': [25, 0, 0], 'lfemur': [-25, 0, 0]}
goal = set_goal(goal_joints, pose)
saveframes, plot = True, True
n_epochs, lr, weight_decay, lh_var = 500, 1, 0, 1
inv_nf = Inverse_model(nfprior, indices, saveframes=saveframes, plot=plot)
inv_nf.inverse_kinematics(goal, n_epochs=n_epochs, lr=lr, lh_var=lh_var, weight_decay=weight_decay)
v = Viewer(dummy_joints_np(), inv_nf.frames)
v.run()
|
987,946 | 2826450917806698d29e38f27dbcdf0803297f88 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy
import random
from geometry_msgs.msg import Twist
from geometry_msgs.msg import PoseWithCovarianceStamped
from sensor_msgs.msg import Image
from sensor_msgs.msg import Imu
from sensor_msgs.msg import LaserScan
from sensor_msgs.msg import JointState
from nav_msgs.msg import Odometry
from std_msgs.msg import String
from cv_bridge import CvBridge, CvBridgeError
import cv2
import tf
import json
import numpy as np
import time
import actionlib
from move_base_msgs.msg import MoveBaseAction, MoveBaseGoal
from nav_msgs.msg import Odometry
import actionlib_msgs
# camera image 640*480
img_w = 640
img_h = 480
image_resize_scale = 1 # 8
# PI
PI = 3.1415
DEGRAD = 3.141592/180
# robot running coordinate in BASIC MODE
#basic_coordinate = np.array([
# # x, y, th(deg)
# [-1.0 , 0.3 , 30], # 1
# [-1.0 ,-0.3 , 330], # 2
# [-0.6 , 0.0 , 0], # 3
# [-0.5 ,-0.1 , 315], # 4
# [ 0 ,-0.6 , 180], # 5
# [ 0 ,-0.6 , 90], # 6
# [ 0 ,-0.5 , 0], # 7
# [ 0.5 ,-0.1 , 45], # 10
#
# [ 1.0 ,-0.3 , 210], # 1
# [ 1.0 , 0.3 , 150], # 2
# [ 0.6 , 0.0 , 180], # 3
# [ 0.5 , 0.1 , 135], # 4
# [ 0 , 0.6 , 0], # 5
# [ 0 , 0.6 , 270], # 6
# [ 0 , 0.5 , 180], # 7
# [-0.5 , 0.1 , 225]] # 10
#)
target_coordinate = np.array([
# [[ 1.20, 0.0 , 180],
[[ 1.00, 0.3 , 150],
[ 0.55, 0.0 , 180],
[ 1.00,-0.3 , 210],
[ 0.9 ,-0.4 , 235]],
# [[-0.1 , 0.7 , 300],
[[ 0 , 0.6 , 0],
[ 0 , 0.6 , 270],
[ 0 , 0.6 , 180],
[ 0.4 , 0.9 , 325]],
# [[-1.2, -0.0 , 0],
[[-1.00,-0.3 , 330],
[-0.55, 0.0 , 0],
[-1.00, 0.3 , 30],
[-0.9 , 0.4 , 55]],
# [[ 0.1 ,-0.7 , 120],
[[ 0 ,-0.6 , 180],
[ 0 ,-0.6 , 90],
[ 0 ,-0.6 , 0],
[-0.4 ,-0.9 , 145]]
])
# [-0.4, 0.0, 0], # 1
# [-0.9, 0.0, 0], # 2
# [-0.9, 0.4, 0], # 3
# [-0.9, -0.4, 0], # 4
# [-0.9, 0.0, 0], # 5
# [0, -0.5, 0], # 6
# [0, -0.5, PI], # 7
# [0, -0.5, PI/2], # 8
# [0, -1.2, PI/2]] # 17
class RandomBot():
def __init__(self, bot_name="NoName"):
# bot name
self.name = bot_name
# velocity publisher
self.vel_pub = rospy.Publisher('cmd_vel', Twist,queue_size=1)
# navigation publisher
self.client = actionlib.SimpleActionClient('move_base',MoveBaseAction)
self.scan = LaserScan()
self.lidar_sub = rospy.Subscriber('scan', LaserScan, self.lidarCallback)
# odom
topicname_odom = "odom"
self.odom = rospy.Subscriber(topicname_odom, Odometry, self.odomCallback)
# amcl pose
topicname_amcl_pose = "amcl_pose"
self.amcl_pose = rospy.Subscriber(topicname_amcl_pose, PoseWithCovarianceStamped, self.AmclPoseCallback)
# usb camera
self.img = None
self.camera_preview = True
self.bridge = CvBridge()
topicname_image_raw = "image_raw"
self.image_sub = rospy.Subscriber(topicname_image_raw, Image, self.imageCallback)
self.basic_mode_process_step_idx = 0 # process step in basic MODE
self.scan_ave = np.zeros((2,12)) # [0]:latest, [1]:prev
self.scan_diff = np.zeros(12)
self.scan_sum = np.zeros(16)
self.myPosX = 0
self.myPosY = -150
self.myDirect = np.pi / 2
## war status
#topicname_war_state = "war_state"
#self.war_state = rospy.Subscriber(topicname_war_state, String, self.stateCallback)
#self.my_score = 0
#self.enemy_score = 0
def odomCallback(self, data):
# print(data.pose.pose.position.x,data.pose.pose.position.y,data.pose.pose.orientation.z,data.pose.pose.orientation.w)
e = tf.transformations.euler_from_quaternion((data.pose.pose.orientation.x, data.pose.pose.orientation.y, data.pose.pose.orientation.z, data.pose.pose.orientation.w))
# print(e[2] / (2 * np.pi) * 360)
self.myDirect = e # rad
def AmclPoseCallback(self, data):
self.myPosX = data.pose.pose.position.x
self.myPosY = data.pose.pose.position.y
# print(self.myPosX, self.myPosY)
# camera image call back sample
# convert image topic to opencv object and show
def imageCallback(self, data):
try:
self.img = self.bridge.imgmsg_to_cv2(data, "bgr8")
except CvBridgeError as e:
print(e)
size = (img_w/image_resize_scale, img_h/image_resize_scale)
frame = cv2.resize(self.img, size)
if self.camera_preview:
#print("image show")
cv2.imshow("Image window", frame)
cv2.waitKey(1)
def stateCallback(self, state):
# print(state.data)
dic = json.loads(state.data)
if self.name == "red_bot": # red_bot
self.my_score = int(dic["scores"]["r"])
self.enemy_score = int(dic["scores"]["b"])
else: # blue_bot
self.my_score = int(dic["scores"]["b"])
self.enemy_score = int(dic["scores"]["r"])
print "Zone0", dic["targets"][ 8]["player"],dic["targets"][14]["player"],dic["targets"][ 6]["player"]
print "Zone1", dic["targets"][ 7]["player"],dic["targets"][16]["player"],dic["targets"][10]["player"]
print "Zone2", dic["targets"][11]["player"],dic["targets"][17]["player"],dic["targets"][13]["player"]
print "Zone3", dic["targets"][12]["player"],dic["targets"][15]["player"],dic["targets"][ 9]["player"]
# Ref: https://hotblackrobotics.github.io/en/blog/2018/01/29/action-client-py/
# Ref: https://github.com/hotic06/burger_war/blob/master/burger_war/scripts/navirun.py
# RESPECT @hotic06
# do following command first.
# $ roslaunch burger_navigation multi_robot_navigation_run.launch
def setGoal(self,x,y,yaw):
self.client.wait_for_server()
goal = MoveBaseGoal()
goal.target_pose.header.frame_id = "map"
goal.target_pose.header.stamp = rospy.Time.now()
goal.target_pose.pose.position.x = x
goal.target_pose.pose.position.y = y
# Euler to Quartanion
q=tf.transformations.quaternion_from_euler(0,0,yaw)
goal.target_pose.pose.orientation.x = q[0]
goal.target_pose.pose.orientation.y = q[1]
goal.target_pose.pose.orientation.z = q[2]
goal.target_pose.pose.orientation.w = q[3]
self.client.send_goal(goal)
wait = self.client.wait_for_result()
if not wait:
rospy.logerr("Action server not available!")
rospy.signal_shutdown("Action server not available!")
return -1
get_state = self.client.get_state()
print("wait", wait, "get_state", get_state)
if get_state == 2: # if send_goal is canceled
return -1
return 0
def cancelGoal(self):
self.client.cancel_goal()
# lidar scan topic call back sample
# update lidar scan state
def lidarCallback(self, data):
self.scan = data
self.scan_ave[1] = self.scan_ave[0] # prev <= latest
self.scan_ave[0,0] = (sum(self.scan.ranges[0:2])+sum(self.scan.ranges[358:359])) * 200 # /5 * 1000
if self.scan_ave[0,0] == float('inf'):
self.scan_ave[0,0] = 100
i = 1
while i < 12:
self.scan_ave[0,i] = sum(self.scan.ranges[i*30-2:i*30+2]) * 200 # /5 * 1000
if self.scan_ave[0,i] == float('inf'):
self.scan_ave[0,i] = 100
i += 1
self.scan_diff = self.scan_ave[0] - self.scan_ave[1]
# RESPECT @koy_tak
# if (self.scan.ranges[0] != 0 and self.scan.ranges[0] < DISTANCE_TO_WALL_THRESHOLD) or (self.scan.ranges[10] != 0 and self.scan.ranges[10] < DISTANCE_TO_WALL_THRESHOLD) or (self.scan.ranges[350] != 0 and self.scan.ranges[350] < DISTANCE_TO_WALL_THRESHOLD):
# self.f_isFrontBumperHit = True
# print("self.f_isFrontBumperHit = True")
# self.cancelGoal()
# else:
# self.f_isFrontBumperHit = False
def calcTwist(self, direction):
if direction == 0:
fr = self.scan_ave[0,0]
f30 = self.scan_ave[0,1]
f60 = self.scan_ave[0,2]
side= self.scan_ave[0,3]
b60 = self.scan_ave[0,4]
b30 = self.scan_ave[0,5]
bo = self.scan_ave[0,7]
sign_x = 1
sign_rot = 1
elif direction == 1:
fr = self.scan_ave[0,6]
f30 = self.scan_ave[0,5]
f60 = self.scan_ave[0,4]
side= self.scan_ave[0,3]
b60 = self.scan_ave[0,2]
b30 = self.scan_ave[0,1]
bo = self.scan_ave[0,11]
sign_x = -1
sign_rot = -1
elif direction == 2:
fr = self.scan_ave[0,0]
f30 = self.scan_ave[0,11]
f60 = self.scan_ave[0,10]
side= self.scan_ave[0,9]
b60 = self.scan_ave[0,8]
b30 = self.scan_ave[0,7]
bo = self.scan_ave[0,5]
sign_x = 1
sign_rot = -1
else:
fr = self.scan_ave[0,6]
f30 = self.scan_ave[0,7]
f60 = self.scan_ave[0,8]
side= self.scan_ave[0,9]
b60 = self.scan_ave[0,10]
b30 = self.scan_ave[0,11]
bo = self.scan_ave[0,1]
sign_x = -1
sign_rot = 1
ratiof = f30 / side
ratiob = b30 / side
print "Lider", '{:.0f}'.format(fr), '{:.0f}'.format(f30), '{:.0f}'.format(f60), '{:.0f}'.format(side), '{:.0f}'.format(b60), '{:.0f}'.format(b30),
print "Dir", '{:.3f}'.format(ratiof), '{:.3f}'.format(ratiob),
ret = 0
if fr < 110:
x = -0.1
th = 0
elif fr < 200 or f30 < 160:
x = 0
#th = 2.0
th = 0
ret = 1
elif b60 < side:
x = 0
th = 0.5
elif f60 < side:
x = 0
th = -0.5
else:
x = 0.22
if ratiof > 3.0 or ratiof < 1.333:
ratiof = 2.0
if ratiob > 3.0 or ratiob < 1.333:
ratiob = 2.0
if side > 180:
if ratiof > 1.76 or ratiob < 2.34:
th = 0.2
else:
th = 0
elif side > 150:
if ratiof > 2.1 or ratiob < 1.9:
th = 0.2
else:
th = 0
elif side > 120:
if ratiof < 1.9 or ratiob > 2.1:
th = -0.2
else:
th = 0
else:
if ratiof < 2.34 or ratiob > 1.76:
th = -0.2
else:
th = 0
if bo > 200 and bo < 300:
x = 0
th = 0
ret = 1
twist = Twist()
twist.linear.x = x * sign_x; twist.linear.y = 0; twist.linear.z = 0
twist.angular.x = 0; twist.angular.y = 0; twist.angular.z = th * sign_rot
print "Twist", '{:.3f}'.format(x), '{:.3f}'.format(th)
#print " myPos", '{:.3f}'.format(self.myPosX), '{:.3f}'.format(self.myPosY), '{:.3f}'.format(self.myDirect)
#print " myPos", self.myPosX, self.myPosY, self.myDirect
self.vel_pub.publish(twist)
#return twist
return ret
def strategy(self):
r = rospy.Rate(3) # change speed 3fps
target_speed = 0
target_turn = 0
control_speed = 0
control_turn = 0
# ---> testrun
#while not rospy.is_shutdown():
# NextGoal_coor = basic_coordinate[ self.basic_mode_process_step_idx ]
# _x = NextGoal_coor[0]
# _y = NextGoal_coor[1]
# _th = NextGoal_coor[2] * DEGRAD
# ret = self.setGoal(_x, _y, _th)
# self.basic_mode_process_step_idx += 1
# if self.basic_mode_process_step_idx >= len(basic_coordinate):
# self.basic_mode_process_step_idx = 0
# ---< testrun
mode = 0
zone = 2
direction = 0
while not rospy.is_shutdown():
print 'mode=',mode,'zone =',zone, "step_idx=", self.basic_mode_process_step_idx
if mode == 0:
NextGoal_coor = target_coordinate[zone, self.basic_mode_process_step_idx ]
_x = NextGoal_coor[0]
_y = NextGoal_coor[1]
_th = NextGoal_coor[2] * DEGRAD
ret = self.setGoal(_x, _y, _th)
self.basic_mode_process_step_idx += 1
#if self.basic_mode_process_step_idx >= 5:
if self.basic_mode_process_step_idx >= 4:
self.basic_mode_process_step_idx = 0
if zone == 0:
zone = 3
else:
zone -= 1
mode = 1
elif mode == 1:
#print 'direction =', direction
ret = self.calcTwist(direction)
if ret == 1:
#if direction == 3:
# direction = 0
#else:
# direction += 1
mode = 0
#print(twist)
#self.vel_pub.publish(twist)
r.sleep()
if __name__ == '__main__':
rospy.init_node('random_run')
bot = RandomBot('Random')
bot.strategy()
|
987,947 | 4d220f728fee371971674da28041baec5081d397 | from visual.controls import *
def change(): # Called by controls when button is clicked
if b.text == 'Click me':
b.text = 'Try again'
else:
b.text = 'Click me'
c = controls() # Create controls window
# Create a button in the controls window:
b = button( pos=(0,0), width=60, height=60,
text='Click me', action=lambda: change() )
while 1:
c.interact() # Check for mouse events and drive specified actions
|
987,948 | 9dfd3c0cc13fa0357338a94cc9c1dd67b4a57778 | #!/usr/bin/env python3.7
from user import User,Credentials
def create_user(uname,password):
"""
Function to create a bew user
"""
new_user = User(uname,password)
return new_user
def save_users(user):
"""
Fuction to save user
"""
user.save_user()
def find_user(user_name):
"""
Function that finds a user by username and returns the user
"""
return User.find_by_user_name(user_name)
def check_existing_users(user_name,password):
"""
Function that check if a user exists with that username and return a boolean
"""
new_user = User(user_name,password)
return new_user
def create_credentials(site_name, site_username, site_password):
'''
Function to create a new credential account
'''
new_credentials = Credentials(site_name, site_username, site_password)
return new_credentials
def save_credentials(credential):
'''
Function to save credentials
'''
credential.save_credential()
def display_credentials():
'''
Function that returns all the saved credentials
'''
return Credentials.display_credentials()
def find_credentials(site_name):
'''
Function that finds a creddential account by sitename and returns the credential account.
'''
return Credentials.find_by_site_name(site_name)
def check_existing_credentials(site_name):
'''
Function that check if a credential account exists with that sitename and return a Boolean
'''
return Credentials.credential_exist(site_name)
def del_contact(credential):
'''
Function to delete a credential account
'''
credential.delete_credential()
def log_in(user_name,password):
"""Function that enables the user to login into his account
"""
log_in == User.log_in(user_name,password)
if log_in != False:
return User.log_in(user_name,password)
def main():
print("Hello Welcome to Password Locker. What is your name?")
user_name = input()
print(f"Hello {user_name}.")
print('\n')
while True:
print('\n')
print("Use these short codes : cc - create a new user, li -to login ")
short_code = input().lower()
if short_code == 'cc':
print("New User")
print("-"*10)
print("User name ....")
user_name = input()
print("Password ...")
password = input()
# create and save new user.
save_users(create_user(user_name, password))
print('\n')
print(f"New User {user_name} created")
print('\n')
print('\n')
if short_code == 'li':
print("Login in")
print('\n')
print("Enter your username")
user_name = input()
print("Enter your password")
password = input()
#user_exist = check_existing_users(user_name, password)
print('\n')
print('\n')
elif short_code =='li':
"""Users login to their accounts
"""
print('\n')
print ("Login to your account")
print("Enter your username")
user_name = input()
print("Enter the password")
password = input()
if log_in(user_name,password) == None:
print('\n')
print("PLease try again or create password")
print('\n')
else:
log_in(user_name,password)
print('\n')
print(f"{user_name} WELCOME TO YOUR CREDENTIALS\n Use these short codes")
while True:
print("Short codes: ca:Credential Account,dc:Display Credential accounts")
if short_code == 'ca':
print("New Credential Account")
print("-"*10)
print("Site name ....")
site_name = input()
print("Site user name ....")
site_username = input()
print("Site Password ...")
site_password = input()
# create and save new credential account.
save_credentials(create_credentials(site_name, site_username, site_password))
print('\n')
print(f"New Credential {site_name} {site_username} {site_password} created")
print('\n')
elif short_code == 'dc':
if display_credentials():
print("Here is a list of all your crenditial accounts")
print('\n')
for credential in display_credentials():
print(f"{credential.site_name} {credential.site_username} .....{credential.site_password}")
print('\n')
else:
print('\n')
print("You dont seem to have any credentials saved yet")
print('\n')
elif short_code == 'd':
print("Enter the credential account you want to delete")
search_site_name = input()
if check_existing_credentials(search_site_name):
search_site_name = find_credentials(
search_site_name)
print(f"{search_site_name.site_name} ")
print('-' * 20)
Credentials.credentials_list.remove(search_site_name)
else:
print("That credential account does not exist")
elif short_code == "ex":
print("Bye .......")
break
else:
print("I really didn't get that. Please use the short codes")
if __name__ == '__main__':
main()
|
987,949 | f7b0d4196f6049279fb3f754233bb260c914d7bd | int i
print("enter the number")
for(i=1;i<=n;i++)
{
printf("Hello World ");
}
|
987,950 | 91a1b5626db189cad227edecfe1fdec03547afd9 |
class LoginPageData:
email_id = 'sbabu@psmi.com'
password = 'Password1!'
invalid_passwd = 'Password1!1'
psmi_landing_page_title = 'Registration Requests' |
987,951 | f979b6b1473ac7be56ff7cb448a495a40ed6f053 | from tkinter import *
class V_SearchReader:
def __init__(self):
self.__root = Tk()
self.__root.title('SearchReader')
self.__root.geometry('400x200')
self.__root.resizable(0,0)
|
987,952 | 61aff5e001d7d0212c7d01d8b6b5b86eb65f2d22 | ############################################################
# -*- coding: utf-8 -*-
#
# # # # # # #
# ## ## # ## # #
# # # # # # # # # # #
# # ## # ## ## ######
# # # # # # #
#
# Python-based Tool for interaction with the 10micron mounts
# GUI with PyQT5 for python
#
# written in python3, (c) 2019-2023 by mworion
# Licence APL2.0
#
###########################################################
# standard libraries
import pytest
from unittest import mock
import os
# external packages
import skyfield.timelib
from PyQt5.QtCore import QObject
from PyQt5.QtCore import QThreadPool, QRect
from PyQt5.QtCore import pyqtSignal, QModelIndex
from PyQt5.QtWidgets import QTableWidgetItem
from skyfield.api import EarthSatellite, Angle, wgs84
from skyfield.units import Distance, Velocity, AngleRate, Rate
from sgp4.exporter import export_tle
import numpy as np
# local import
from tests.unit_tests.unitTestAddOns.baseTestApp import App
from gui.utilities.toolsQtWidget import MWidget
from gui.widgets.main_ui import Ui_MainWindow
from gui.mainWmixin.tabSat_Search import SatSearch
from gui.mainWmixin.tabSat_Track import SatTrack
from logic.databaseProcessing.dataWriter import DataWriter
@pytest.fixture(autouse=True, scope='module')
def function(qapp):
class Mixin(MWidget, SatSearch, SatTrack):
def __init__(self):
super().__init__()
self.app = App()
self.msg = self.app.msg
self.databaseProcessing = DataWriter(self.app)
self.threadPool = QThreadPool()
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
SatSearch.__init__(self)
SatTrack.__init__(self)
window = Mixin()
yield window
window.closing = True
window.threadPool.waitForDone(1000)
def test_sources(function):
assert len(function.satelliteSourceURLs) == 14
def test_initConfig_1(function):
class Test:
installPath = ''
temp = function.app.automation
function.app.automation = Test()
suc = function.initConfig()
assert suc
assert function.installPath == 'tests/workDir/data'
function.app.automation = temp
def test_initConfig_2(function):
temp = function.app.automation
function.app.automation = None
suc = function.initConfig()
assert suc
assert function.installPath == 'tests/workDir/data'
function.app.automation = temp
def test_initConfig_3(function):
temp = function.app.automation.installPath
function.app.automation.installPath = 'test'
suc = function.initConfig()
assert suc
assert function.installPath == 'test'
function.app.automation.installPath = temp
def test_storeConfig_1(function):
suc = function.storeConfig()
assert suc
def test_enableGuiFunctions_1(function):
with mock.patch.object(function.app.mount.firmware,
'checkNewer',
return_value=None):
suc = function.enableGuiFunctions()
assert not suc
def test_enableGuiFunctions_2(function):
with mock.patch.object(function.app.mount.firmware,
'checkNewer',
return_value=True):
suc = function.enableGuiFunctions()
assert suc
def test_chooseSatellite_1(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satellites = {'NOAA 8': sat}
satTab = function.ui.listSatelliteNames
function.ui.switchToTrackingTab.setChecked(True)
function.app.deviceStat['mount'] = True
with mock.patch.object(satTab,
'item'):
with mock.patch.object(function,
'extractSatelliteData'):
with mock.patch.object(function,
'showSatPasses'):
suc = function.chooseSatellite()
assert suc
def test_chooseSatellite_2(function):
satTab = function.ui.listSatelliteNames
function.ui.switchToTrackingTab.setChecked(False)
function.app.deviceStat['mount'] = False
with mock.patch.object(satTab,
'item'):
with mock.patch.object(function,
'extractSatelliteData'):
with mock.patch.object(function,
'showSatPasses'):
suc = function.chooseSatellite()
assert suc
def test_getSatelliteDataFromDatabase_1(function):
class Name:
name = ''
jdStart = 1
jdEnd = 1
flip = False
message = ''
altitude = None
azimuth = None
function.app.mount.satellite.tleParams = Name()
suc = function.getSatelliteDataFromDatabase()
assert not suc
def test_findSunlit(function):
class SAT:
class FRAME:
def __init__(self, x):
pass
@staticmethod
def is_sunlit(x):
return True
at = FRAME
sat = SAT()
eph = None
tEv = None
val = function.findSunlit(sat, eph, tEv)
assert val
def test_findSatUp_1(function):
class SAT:
@staticmethod
def find_events(x, y, z, altitude_degrees):
return [], []
sat = SAT()
val = function.findSatUp(sat, 0, 0, 0, alt=0)
assert not val[0]
assert not len(val[1])
def test_findSatUp_2(function):
class SAT:
@staticmethod
def find_events(x, y, z, altitude_degrees):
return np.array([5, 7, 7]), np.array([1, 0, 0])
sat = SAT()
val = function.findSatUp(sat, 0, 0, 0, alt=0)
assert val[0]
assert val[1] == [5]
def test_checkTwilight_1(function):
ephemeris = function.app.ephemeris
loc = wgs84.latlon(latitude_degrees=49, longitude_degrees=-11)
tEv = function.app.mount.obsSite.ts.tt_jd(2459215.5)
val = function.checkTwilight(ephemeris, loc, [False, tEv])
assert val == 4
def test_checkTwilight_2(function):
ephemeris = function.app.ephemeris
loc = wgs84.latlon(latitude_degrees=49, longitude_degrees=-11)
tEv = function.app.mount.obsSite.ts.tt_jd(2459215.5)
val = function.checkTwilight(ephemeris, loc, [True, [tEv]])
assert val == 0
def test_findRangeRate(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
loc = wgs84.latlon(latitude_degrees=49, longitude_degrees=-11)
tEv = function.app.mount.obsSite.ts.tt_jd(2459215.5)
val = function.findRangeRate(sat, loc, tEv)
assert round(val[0], 3) == 5694.271
assert round(val[1], 3) == -0.678
assert round(val[2], 3) == 0.004
assert round(val[3], 3) == 0.079
def test_calcSatSunPhase_1(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
loc = wgs84.latlon(latitude_degrees=49, longitude_degrees=-11)
ephemeris = function.app.ephemeris
tEv = function.app.mount.obsSite.ts.tt_jd(2459215.5)
val = function.calcSatSunPhase(sat, loc, ephemeris, tEv)
assert round(val.degrees, 3) == 129.843
def test_calcAppMag_1(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
loc = wgs84.latlon(latitude_degrees=49, longitude_degrees=-11)
ephemeris = function.app.ephemeris
satRange = 483
phase = Angle(degrees=113)
tEv = function.app.mount.obsSite.ts.now()
with mock.patch.object(function,
'calcSatSunPhase',
return_value=phase):
val = function.calcAppMag(sat, loc, ephemeris, satRange, tEv)
assert round(val, 4) == -2.0456
def test_setSatTableEntry(function):
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('test')
suc = function.setSatTableEntry(0, 0, entry)
assert suc
def test_updateTableEntries_1(function):
param = [1, 2, 3, 4]
suc = function.updateTableEntries(0, param)
assert suc
def test_updateTableEntries_2(function):
param = [1, 2, 3, 4]
ts = function.app.mount.obsSite.ts.now()
isUp = (True, [ts])
suc = function.updateTableEntries(0, param, isUp)
assert suc
def test_updateTableEntries_3(function):
param = [1, 2, 3, 4]
ts = function.app.mount.obsSite.ts.now()
isUp = (False, [ts])
suc = function.updateTableEntries(0, param, isUp)
assert suc
def test_updateTableEntries_4(function):
param = [1, 2, 3, 4]
ts = function.app.mount.obsSite.ts.now()
isUp = (False, [ts])
suc = function.updateTableEntries(0, param, isUp, True, 5)
assert suc
def test_updateTableEntries_5(function):
param = [1, 2, 3, 4]
ts = function.app.mount.obsSite.ts.now()
isUp = (False, [ts])
suc = function.updateTableEntries(0, param, isUp, False, 5, 4)
assert suc
def test_satCalcDynamicTable_1(function):
function.satTableDynamicValid = False
suc = function.satCalcDynamicTable()
assert not suc
def test_satCalcDynamicTable_2(function):
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(1)
function.ui.mainTabWidget.setCurrentIndex(1)
suc = function.satCalcDynamicTable()
assert not suc
def test_satCalcDynamicTable_3(function):
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(1)
suc = function.satCalcDynamicTable()
assert not suc
def test_satCalcDynamicTable_4(function):
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
suc = function.satCalcDynamicTable()
assert suc
def test_satCalcDynamicTable_5(function):
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('test')
function.ui.listSatelliteNames.setItem(0, 0, entry)
with mock.patch.object(QRect,
'intersects',
return_value=False):
with mock.patch.object(function,
'calcAppMag',
return_value=10):
with mock.patch.object(function,
'findSunlit',
return_value=True):
suc = function.satCalcDynamicTable()
assert suc
def test_satCalcDynamicTable_6(function):
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('test')
function.ui.listSatelliteNames.setItem(0, 0, entry)
function.ui.listSatelliteNames.setRowHidden(0, True)
with mock.patch.object(function,
'findSunlit',
return_value=True):
with mock.patch.object(function,
'calcAppMag',
return_value=10):
with mock.patch.object(QRect,
'intersects',
return_value=True):
suc = function.satCalcDynamicTable()
assert suc
def test_satCalcDynamicTable_7(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(2)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('NOAA 8')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.ui.listSatelliteNames.setRowHidden(0, False)
function.satellites = {'NOAA 8': sat}
with mock.patch.object(function,
'updateTableEntries'):
with mock.patch.object(function,
'findRangeRate',
return_value=[1, 2, 3]):
with mock.patch.object(function,
'findSunlit',
return_value=False):
with mock.patch.object(QRect,
'intersects',
return_value=True):
suc = function.satCalcDynamicTable()
assert suc
def test_satCalcDynamicTable_8(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(2)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('NOAA 8')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.ui.listSatelliteNames.setRowHidden(0, False)
function.satellites = {'NOAA 8': sat}
with mock.patch.object(function,
'updateTableEntries'):
with mock.patch.object(function,
'findRangeRate',
return_value=[1, 2, 3]):
with mock.patch.object(function,
'findSunlit',
return_value=True):
with mock.patch.object(function,
'calcAppMag',
return_value=10):
with mock.patch.object(QRect,
'intersects',
return_value=True):
suc = function.satCalcDynamicTable()
assert suc
def test_satCalcDynamicTable_9(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satTableDynamicValid = True
function.ui.satTabWidget.setCurrentIndex(0)
function.ui.mainTabWidget.setCurrentIndex(6)
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(2)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('NOAA 8')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.ui.listSatelliteNames.setRowHidden(0, False)
function.satellites = {'NOAA 8': sat}
with mock.patch.object(function,
'updateTableEntries'):
with mock.patch.object(function,
'findRangeRate',
return_value=[np.nan, 2, 3]):
with mock.patch.object(function,
'findSunlit',
return_value=True):
with mock.patch.object(function,
'calcAppMag',
return_value=10):
with mock.patch.object(QRect,
'intersects',
return_value=True):
suc = function.satCalcDynamicTable()
assert suc
def test_positionCursorInSatTable_1(function):
satTab = function.ui.listSatelliteNames
satTab.setRowCount(0)
satTab.setColumnCount(2)
satTab.insertRow(0)
entry = QTableWidgetItem('NOAA 8')
satTab.setItem(0, 1, entry)
suc = function.positionCursorInSatTable(satTab, 'test')
assert not suc
def test_positionCursorInSatTable_2(function):
satTab = function.ui.listSatelliteNames
satTab.setRowCount(0)
satTab.setColumnCount(2)
satTab.insertRow(0)
entry = QTableWidgetItem('NOAA 8')
satTab.setItem(0, 1, entry)
suc = function.positionCursorInSatTable(satTab, 'NOAA 8')
assert suc
def test_filterSatelliteNamesList_1(function):
function.ui.satFilterGroup.setEnabled(True)
function.ui.satIsUp.setEnabled(True)
function.ui.satIsUp.setChecked(True)
function.ui.satIsSunlit.setEnabled(True)
function.ui.satIsSunlit.setChecked(True)
function.ui.satRemoveSO.setChecked(True)
function.ui.listSatelliteNames.clear()
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('1234')
function.ui.listSatelliteNames.setItem(0, 0, entry)
entry = QTableWidgetItem('NOAA 8')
function.ui.listSatelliteNames.setItem(0, 1, entry)
entry = QTableWidgetItem('1')
function.ui.listSatelliteNames.setItem(0, 8, entry)
entry = QTableWidgetItem('1234')
function.ui.listSatelliteNames.setItem(0, 7, entry)
with mock.patch.object(function.ui.satTwilight,
'currentIndex',
return_value=1):
suc = function.filterSatelliteNamesList()
assert suc
def test_checkSatOk_1(function):
tle = ["STARLINK-1914",
"1 47180U 20088BL 21303.19708368 .16584525 12000-4 30219-2 0 9999",
"2 47180 53.0402 223.8709 0008872 210.0671 150.2394 16.31518727 52528"]
ts = function.app.mount.obsSite.ts
tEnd = ts.tt_jd(2459523.2430)
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
suc = function.checkSatOk(sat, tEnd)
assert not suc
def test_checkSatOk_2(function):
tle = ["CALSPHERE 1",
"1 00900U 64063C 21307.74429300 .00000461 00000-0 48370-3 0 9996",
"2 00900 90.1716 36.8626 0025754 343.8320 164.5583 13.73613883839670"]
ts = function.app.mount.obsSite.ts
tEnd = ts.tt_jd(2459523.2430)
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
suc = function.checkSatOk(sat, tEnd)
assert suc
def test_workerSatCalcTable_1(function):
function.ui.listSatelliteNames.setRowCount(0)
suc = function.workerSatCalcTable()
assert suc
assert function.satTableDynamicValid
def test_workerSatCalcTable_2(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satellites = {'sat1': sat}
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('sat1')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.satTableBaseValid = False
function.satTableDynamicValid = False
function.ui.satUpTimeWindow.setValue(0)
with mock.patch.object(function,
'findRangeRate'):
with mock.patch.object(function,
'findSunlit',
return_value=False):
with mock.patch.object(function,
'findSatUp'):
with mock.patch.object(function,
'updateTableEntries'):
suc = function.workerSatCalcTable()
assert not suc
def test_workerSatCalcTable_3a(function):
tle = ["STARLINK-1914",
"1 47180U 20088BL 21303.19708368 .16584525 12000-4 30219-2 0 9999",
"2 47180 53.0402 223.8709 0008872 210.0671 150.2394 16.31518727 52528"]
function.satellites = {'sat1': EarthSatellite(tle[1], tle[2], name=tle[0])}
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('sat1')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.satTableBaseValid = True
function.satTableDynamicValid = False
function.ui.satUpTimeWindow.setValue(2)
with mock.patch.object(function,
'checkSatOk',
return_value=False):
suc = function.workerSatCalcTable()
assert suc
assert function.satTableDynamicValid
def test_workerSatCalcTable_3b(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satellites = {'sat1': sat}
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('sat1')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.satTableBaseValid = True
function.satTableDynamicValid = False
function.ui.satUpTimeWindow.setValue(2)
with mock.patch.object(function,
'checkSatOk',
return_value=True):
with mock.patch.object(function,
'findRangeRate',
return_value=(0, 0, 0, 0)):
with mock.patch.object(function,
'findSunlit',
return_value=False):
with mock.patch.object(function,
'findSatUp'):
with mock.patch.object(function,
'findSatUp'):
with mock.patch.object(function,
'checkTwilight'):
with mock.patch.object(function,
'calcAppMag',
return_value=0):
with mock.patch.object(function,
'updateTableEntries'):
suc = function.workerSatCalcTable()
assert suc
assert function.satTableDynamicValid
def test_workerSatCalcTable_4(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satellites = {'sat1': sat}
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('sat1')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.satTableBaseValid = True
function.satTableDynamicValid = False
function.ui.satUpTimeWindow.setValue(2)
with mock.patch.object(function,
'checkSatOk',
return_value=True):
with mock.patch.object(function,
'findRangeRate'):
with mock.patch.object(function,
'findSunlit',
return_value=True):
with mock.patch.object(function,
'findSatUp'):
with mock.patch.object(function,
'checkTwilight'):
with mock.patch.object(function,
'updateTableEntries'):
with mock.patch.object(function,
'calcAppMag',
return_value=0):
suc = function.workerSatCalcTable()
assert suc
assert function.satTableDynamicValid
def test_workerSatCalcTable_5(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satellites = {'sat1': sat}
function.ui.listSatelliteNames.setRowCount(0)
function.ui.listSatelliteNames.setColumnCount(9)
function.ui.listSatelliteNames.insertRow(0)
entry = QTableWidgetItem('sat1')
function.ui.listSatelliteNames.setItem(0, 1, entry)
function.satTableBaseValid = True
function.satTableDynamicValid = False
function.ui.satUpTimeWindow.setValue(2)
with mock.patch.object(function,
'checkSatOk',
return_value=True):
with mock.patch.object(function,
'findRangeRate',
return_value=[np.nan]):
with mock.patch.object(function,
'findSunlit',
return_value=True):
with mock.patch.object(function,
'findSatUp'):
with mock.patch.object(function,
'updateTableEntries'):
with mock.patch.object(function,
'calcAppMag',
return_value=0):
suc = function.workerSatCalcTable()
assert suc
assert function.satTableDynamicValid
def test_satCalcTable_1(function):
function.satTableBaseValid = False
suc = function.satCalcTable()
assert not suc
def test_satCalcTable_2(function):
function.satTableBaseValid = True
function.satTableDynamicValid = True
with mock.patch.object(function.threadPool,
'start'):
suc = function.satCalcTable()
assert suc
assert not function.satTableDynamicValid
def test_updateSatTable_1(function):
function.ui.satCyclicUpdates.setChecked(False)
suc = function.updateSatTable()
assert not suc
def test_updateSatTable_2(function):
function.ui.satCyclicUpdates.setChecked(True)
with mock.patch.object(function,
'satCalcTable'):
suc = function.updateSatTable()
assert suc
def test_prepareSatTable_1(function):
suc = function.prepareSatTable()
assert suc
def test_setupSatelliteNameList_1(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satSourceValid = False
function.satellites = {'sat1': sat}
with mock.patch.object(function,
'prepareSatTable'):
suc = function.setupSatelliteNameList()
assert not suc
def test_setupSatelliteNameList_2(function):
tle = ["NOAA 8",
"1 13923U 83022A 20076.90417581 .00000005 00000-0 19448-4 0 9998",
"2 13923 98.6122 63.2579 0016304 96.9736 263.3301 14.28696485924954"]
sat = EarthSatellite(tle[1], tle[2], name=tle[0])
function.satSourceValid = True
function.satellites = {'sat1': sat}
with mock.patch.object(function,
'prepareSatTable'):
with mock.patch.object(function,
'filterSatelliteNamesList'):
with mock.patch.object(function,
'satCalcTable'):
suc = function.setupSatelliteNameList()
assert suc
assert function.satTableBaseValid
def test_workerLoadDataFromSourceURLs_1(function):
with mock.patch.object(function.app.mount.obsSite.loader,
'tle_file',
return_value={}):
suc = function.workerLoadDataFromSourceURLs()
assert not suc
def test_workerLoadDataFromSourceURLs_2(function):
source = 'test'
with mock.patch.object(function.app.mount.obsSite.loader,
'tle_file',
return_value={}):
with mock.patch.object(os.path,
'isfile',
return_value=False):
suc = function.workerLoadDataFromSourceURLs(source=source,
isOnline=False)
assert not suc
def test_workerLoadDataFromSourceURLs_3(function):
source = 'test'
function.satSourceValid = False
with mock.patch.object(function.app.mount.obsSite.loader,
'tle_file',
return_value={}):
with mock.patch.object(os.path,
'isfile',
return_value=True):
with mock.patch.object(function.app.mount.obsSite.loader,
'days_old',
return_value=5):
suc = function.workerLoadDataFromSourceURLs(source=source,
isOnline=True)
assert suc
assert function.satSourceValid
def test_loadDataFromSourceURLs_1(function):
function.ui.satelliteSource.clear()
suc = function.loadDataFromSourceURLs()
assert not suc
def test_loadDataFromSourceURLs_2(function):
function.ui.satelliteSource.clear()
suc = function.loadDataFromSourceURLs()
assert not suc
def test_loadDataFromSourceURLs_3(function):
function.ui.satelliteSource.addItem('Active')
function.ui.satelliteSource.setCurrentIndex(0)
suc = function.loadDataFromSourceURLs()
assert suc
def test_progSatellites_1(function):
raw = 'test'
with mock.patch.object(function.databaseProcessing,
'writeSatelliteTLE',
return_value=False):
suc = function.progSatellites(raw)
assert not suc
def test_progSatellites_2(function):
raw = 'test'
with mock.patch.object(function.databaseProcessing,
'writeSatelliteTLE',
return_value=True):
with mock.patch.object(function.app.automation,
'uploadTLEData',
return_value=False):
suc = function.progSatellites(raw)
assert not suc
def test_progSatellites_3(function):
raw = 'test'
with mock.patch.object(function.databaseProcessing,
'writeSatelliteTLE',
return_value=True):
with mock.patch.object(function.app.automation,
'uploadTLEData',
return_value=True):
suc = function.progSatellites(raw)
assert suc
def test_satelliteFilter_1(function):
class SatNum:
satnum = 1
class Model:
model = SatNum()
raw = {'test': Model(), '0815': Model(), 0: Model()}
function.ui.filterSatellite.setText('test')
val = function.satelliteFilter(raw)
assert 'test' in val
def test_satelliteGUI_1(function):
with mock.patch.object(function,
'checkUpdaterOK',
return_value=False):
suc = function.satelliteGUI()
assert not suc
def test_satelliteGUI_2(function):
with mock.patch.object(function,
'checkUpdaterOK',
return_value=True):
with mock.patch.object(function,
'messageDialog',
return_value=False):
suc = function.satelliteGUI()
assert not suc
def test_satelliteGUI_3(function):
function.ui.minorPlanetSource.clear()
function.ui.minorPlanetSource.addItem('Comet')
function.ui.minorPlanetSource.setCurrentIndex(0)
with mock.patch.object(function,
'checkUpdaterOK',
return_value=True):
with mock.patch.object(function,
'messageDialog',
return_value=True):
suc = function.satelliteGUI()
assert suc
def test_progSatellitesFiltered_1(function):
with mock.patch.object(function,
'satelliteGUI',
return_value=False):
suc = function.progSatellitesFiltered()
assert not suc
def test_progSatellitesFiltered_2(function):
with mock.patch.object(function,
'satelliteGUI',
return_value=True):
with mock.patch.object(function,
'progSatellites'):
with mock.patch.object(function,
'satelliteFilter'):
suc = function.progSatellitesFiltered()
assert suc
def test_progSatellitesFull_1(function):
with mock.patch.object(function,
'satelliteGUI',
return_value=False):
suc = function.progSatellitesFull()
assert not suc
def test_progSatellitesFull_2(function):
with mock.patch.object(function,
'satelliteGUI',
return_value=True):
with mock.patch.object(function,
'progSatellites'):
suc = function.progSatellitesFull()
assert suc
|
987,953 | f96d2da2661f787c4db845e4899b94c63058e829 | # Generated by Django 1.11.21 on 2019-07-08 11:57
import django.db.models.deletion
from django.db import migrations, models
import waldur_core.core.fields
import waldur_core.core.models
import waldur_core.core.validators
class Migration(migrations.Migration):
dependencies = [
('structure', '0009_project_is_removed'),
('waldur_opennebula', '0010_virtualmachine_networks'),
]
operations = [
migrations.CreateModel(
name='CustomerDatastoreNew',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'customer',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='structure.Customer',
),
),
],
),
migrations.CreateModel(
name='Datastore',
fields=[
(
'id',
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name='ID',
),
),
(
'name',
models.CharField(
max_length=150,
validators=[waldur_core.core.validators.validate_name],
verbose_name='name',
),
),
('uuid', waldur_core.core.fields.UUIDField()),
('backend_id', models.CharField(db_index=True, max_length=255)),
('type', models.CharField(max_length=255)),
(
'capacity',
models.PositiveIntegerField(
blank=True, help_text='Capacity, in MB.', null=True
),
),
(
'free_space',
models.PositiveIntegerField(
blank=True, help_text='Available space, in MB.', null=True
),
),
(
'settings',
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name='+',
to='structure.ServiceSettings',
),
),
],
options={'abstract': False,},
bases=(waldur_core.core.models.BackendModelMixin, models.Model),
),
migrations.AddField(
model_name='CustomerDatastoreNew',
name='datastore',
field=models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
to='waldur_opennebula.Datastore',
),
),
migrations.AddField(
model_name='virtualmachine',
name='datastore',
field=models.ForeignKey(
null=True,
on_delete=django.db.models.deletion.SET_NULL,
to='waldur_opennebula.Datastore',
),
),
migrations.AlterUniqueTogether(
name='datastore', unique_together=set([('settings', 'backend_id')]),
),
migrations.AlterUniqueTogether(
name='CustomerDatastoreNew', unique_together=set([('customer', 'datastore')]),
),
]
|
987,954 | 5873c89c1a284cd3d4d267cc9be25187e9f2cd24 | # -*- coding: utf-8 -*-
import pandas as pd
from sklearn.cross_validation import train_test_split
from sklearn.metrics import recall_score, precision_score
#导入随机森林算法库
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.grid_search import GridSearchCV
from sklearn.grid_search import RandomizedSearchCV
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
from sklearn.ensemble import GradientBoostingClassifier
import datetime
#from sklearn.model_selection import train_test_split
#from sklearn.model_selection import GridSearchCV
from sklearn.metrics import classification_report
from xgboost.sklearn import XGBClassifier
from sklearn.decomposition import PCA
from sklearn.metrics import precision_recall_fscore_support
#df_All = pd.read_csv("train_new.csv", sep=',')
#df_All = pd.read_csv("train_notest.csv", sep=',')
df_All = pd.read_csv("train_1110_LS.csv", sep=',')
df_All = df_All[(df_All["label"]==0) | (df_All["label"]==1)]
df_All = df_All.fillna(-1)
df_All = shuffle(df_All)
#df_X = df_All.drop( ["certid","label","term_cd-most_frequent_item","mchnt_cd-most_frequent_item", "aera_code", "apply_dateNo", "card_accprt_nm_loc-most_frequent_item"], axis=1,inplace=False)
df_X = df_All.drop( ["certid","label"], axis=1,inplace=False)
# pca = PCA(n_components = 250, svd_solver = 'full')
# #pca = PCA(n_components ='mle')
# df_X = pd.DataFrame(pca.fit_transform(df_X))
#df_X = df_X.iloc[:, 6:]
df_y = df_All["label"]
X_train, X_test, y_train, y_test = train_test_split(df_X, df_y, test_size=0.2)
#clf = XGBClassifier(learning_rate =0.1,n_estimators=500,max_depth=5,gamma=0.05,subsample=0.8,colsample_bytree=0.8,objective= 'binary:logistic', reg_lambda=1,seed=27)
clf = XGBClassifier(learning_rate =0.1,n_estimators=1000,max_depth=5,gamma=0.05,subsample=0.8,colsample_bytree=0.8,objective= 'binary:logistic', reg_lambda=1,seed=27)
clf.fit(X_train, y_train)
pred = clf.predict(X_test)
cm1=confusion_matrix(y_test,pred)
print cm1
#print "Each class\n"
result = precision_recall_fscore_support(y_test,pred)
#print result
precision_0 = result[0][0]
recall_0 = result[1][0]
f1_0 = result[2][0]
precision_1 = result[0][1]
recall_1 = result[1][1]
f1_1 = result[2][1]
print "precision_0: ", precision_0," recall_0: ", recall_0, " f1_0: ", f1_0
|
987,955 | 5a856534b94b5030e659023d3d8f6b55d64e0c40 | import numpy as np
from standardizeList import standardizeList
# Randomly choose bench players [fr. relative (MP/G)^N]
def benchSelect(bench, num, power):
players = list(bench.Player)
probs = standardizeList(list(bench.MP_per_game), power)
choices = sorted(np.random.choice(players, replace=False, size=num, p=probs))
bench = bench[bench['Player'].isin(choices)]
return(bench) |
987,956 | befac1765b645a3463446f377e67d8a296a8f64c | #var
count= 0
fact=1
#input
num = int(input("Please enter a number:\n"))
#loop
while num > count:
count += 1
fact *= count
#statement
print("The factorial is " +str(fact))
|
987,957 | b3362310290e1cff92d0567722cccd2cccd86fd2 | #!/usr/bin/env python
import curses
import calendar
import gevent
import time
import global_mod as g
import getstr
class BlockViewer(object):
def __init__(self, block_store, window):
self._block_store = block_store
self._window = window
self._mode = None # TODO debug
self._browse_height = None
self._keymap = {
curses.KEY_DOWN: (self._scroll_down, ),
curses.KEY_UP: (self._scroll_up, ),
curses.KEY_HOME: (self._seek, -1000),
curses.KEY_END: (self._seek, 1000),
# ord('l'): go_to_latest_block,
# ord('L'): go_to_latest_block,
ord('j'): (self._seek, -1),
ord('J'): (self._seek, -1),
ord('k'): (self._seek, 1),
ord('K'): (self._seek, 1),
}
self._reset_cursors()
def _reset_cursors(self):
self._cursor = 0
self._offset = 0
def on_block(self, block):
if not self._browse_height:
self._browse_height = block.blockheight
if self._mode and self._mode == "block":
self.draw()
def draw(self):
def draw_transactions(block):
# TODO: fix this
# window_height = state['y'] - 6
window_height = 10
win_transactions = curses.newwin(window_height, 75, 5, 0)
tx_count = len(block.tx)
bytes_per_tx = block.size // tx_count
win_transactions.addstr(0, 1, "Transactions: " + ("% 4d" % tx_count + " (" + str(bytes_per_tx) + " bytes/tx)").ljust(26) + "(UP/DOWN: scroll, ENTER: view)", curses.A_BOLD + curses.color_pair(5))
# reset cursor if it's been resized off the bottom
if self._cursor > self._offset + (window_height-2):
self._offset = self._cursor - (window_height-2)
# reset cursor if the block changed and it's nonsense now
if self._cursor >= tx_count or self._offset >= tx_count:
self._reset_cursors()
offset = self._offset
for index in range(offset, offset+window_height-1):
if index < tx_count:
if index == self._cursor:
win_transactions.addstr(index+1-offset, 1, ">", curses.A_REVERSE + curses.A_BOLD)
condition = (index == offset+window_height-2) and (index+1 < tx_count)
condition = condition or ( (index == offset) and (index > 0) )
if condition:
win_transactions.addstr(index+1-offset, 3, "...")
else:
win_transactions.addstr(index+1-offset, 3, block.tx[index])
win_transactions.refresh()
def draw_block(block):
win_header = curses.newwin(5, 75, 0, 0)
win_header.addstr(0, 1, "height: " + str(block.blockheight).zfill(6) + " (J/K: browse, HOME/END: quicker, L: latest, G: seek)", curses.A_BOLD)
win_header.addstr(1, 1, "hash: " + block.blockhash, curses.A_BOLD)
win_header.addstr(2, 1, "root: " + block.merkleroot, curses.A_BOLD)
win_header.addstr(3, 1, "{} bytes ({} KB)".format(block.size, block.size//1024), curses.A_BOLD)
win_header.addstr(3, 26, "diff: {:,d}".format(int(block.difficulty)), curses.A_BOLD)
win_header.addstr(3, 52, time.strftime("%Y-%m-%d %H:%M:%S", time.gmtime(block.time)), curses.A_BOLD)
win_header.addstr(4, 51, ("v" + str(block.version)).rjust(20), curses.A_BOLD)
win_header.refresh()
def draw_no_block():
win_header = curses.newwin(5, 75, 0, 0)
win_header.addstr(0, 1, "height: " + str(self._browse_height).zfill(6) + " (no block information loaded)", curses.A_BOLD + curses.color_pair(3))
win_header.addstr(1, 1, "press 'G' to enter a block hash, height, or timestamp", curses.A_BOLD)
win_header.refresh()
self._window.clear()
self._window.refresh()
if self._browse_height is not None:
# TODO: try/except on KeyError here?
try:
blockhash = self._block_store.get_hash(self._browse_height)
block = self._block_store.get_block(blockhash)
except KeyError:
draw_no_block()
return
draw_block(block)
draw_transactions(block)
else:
draw_no_block()
def get_selected_txid(self):
if self._browse_height is None:
return None
try:
blockhash = self._block_store.get_hash(self._browse_height)
block = self._block_store.get_block(blockhash)
except KeyError:
return None
if len(block.tx) <= self._cursor:
return None
return block.tx[self._cursor]
def _seek(self, delta):
if self._browse_height is None:
return
new_browse_height = self._browse_height + delta
if new_browse_height < 0:
return
self._reset_cursors()
self._browse_height = new_browse_height
try:
blockhash = self._block_store.get_hash(self._browse_height)
self.draw()
except KeyError:
self._block_store.request_blockheight(self._browse_height)
def _seek_back_one(self):
self._seek(-1)
def _seek_forward_one(self):
self._seek(1)
def _seek_back_thousand(self):
self._seek(-1000)
def _seek_forward_thousand(self):
self._seek(1000)
def _scroll_down(self):
if self._browse_height is None:
return
try:
blockhash = self._block_store.get_hash(self._browse_height)
block = self._block_store.get_block(blockhash)
except KeyError:
return
if self._cursor < (len(block.tx) - 1):
self._cursor += 1
window_height = 10
if (self._cursor - self._offset) > window_height-2:
self._offset += 1
self.draw()
def _scroll_up(self):
if self._browse_height is None:
return
if self._cursor > 0:
if (self._cursor - self._offset) == 0:
self._offset -= 1
self._cursor -= 1
self.draw()
def handle_hotkey(self, key):
if not self._mode or self._mode != "block":
return
if key in self._keymap:
fn, *args = self._keymap[key]
fn(*args)
return True
return False
|
987,958 | a8287d589bdaf43b03e362184eb2dd3fe6fc6282 | from abc import abstractmethod, ABCMeta
# Stolen from cpython's _collection_abc.py
def _check_methods(C, *methods):
mro = C.__mro__
for method in methods:
for B in mro:
if method in B.__dict__:
if B.__dict__[method] is None:
return NotImplemented
break
else:
return NotImplemented
return True
class Updateable(metaclass=ABCMeta):
# A class is Updateable if it has an update() method.
__slots__ = ()
@abstractmethod
def update(self):
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if cls is Updateable:
return _check_methods(subclass, "update")
return NotImplemented
class Drawable(metaclass=ABCMeta):
# A class is Updateable if it has a draw() method.
__slots__ = ()
@abstractmethod
def draw(self):
raise NotImplementedError
@classmethod
def __subclasshook__(cls, subclass):
if cls is Drawable:
return _check_methods(subclass, "draw")
return NotImplemented
class Collidable(metaclass=ABCMeta):
# A class is Updateable if it has a collision_box attribute.
__slots__ = ()
@property
@abstractmethod
def collision_box(self):
return None
@classmethod
def __subclasshook__(cls, subclass):
if cls is Collidable:
return hasattr(subclass, "collision_box")
return NotImplemented
|
987,959 | fb14a569c127111dca1caec1a8813d20af2602a1 | import critic
import performance_system
import experiment_generator
import generalizer
from random import uniform
'''
This module used for train management and study with the other modules
'''
end_main = False
while not end_main:
print("--------------")
print("Welcome to manager view")
print("1. Create and train new weight")
print("2. Play game using weight")
print("3. Exit")
inp = int(input('Enter your choice: '))
if inp == 1:
num_of_trains = int(input('Enter number of trains: '))
print("Creates a starting vector with random values")
w = []
for i in range(31):
w.append(uniform(-5.0, 5.0))
print("Starting vector is: ")
print(w)
if input("Do you want to save start vector in file before running the learning process(y\\n)? ") == "y":
with open(input("Enter file name: ")+".txt", "w")as f:
f.write(str(w))
print("File saved")
print("Start lerning process")
for i in range(num_of_trains):
s_b = experiment_generator.get_exp(1)
m = performance_system.play_game_against_himself(s_b, 2, w)
ts = critic.make_train_set(m, w, 1)
w = generalizer.LMS_update(ts, w)
if i % 100 == 0:
print("update: " + str(100*(i/num_of_trains)) + "%")
print("Done!")
print(w)
if input("Do you want to save the vector(y\n)? ") == "y":
with open(input("Enter file name: ")+".txt", "w")as f:
f.write(str(w))
print("File saved")
print("Finish training, return to main menu")
elif inp == 2:
w = []
with open(input("Enter file name: ")+".txt", "r")as f:
w = list(f.read())
print("Weight loaded")
print("Start game, have fun!")
performance_system.play_game_against_user(1, w)
print("Finish game, return to main menu")
else:
exit()
|
987,960 | 0bee34fb247bfb780bfec0b95a83cb3d56b0af20 | from pyparsing import oneOf, Literal, Word, Optional, Combine, delimitedList, MatchFirst, CaselessLiteral
from ...util.grammar import *
def define_encode():
encodeKeyword = CaselessLiteral("encode").setResultsName('encode')
encode_options = _define_encode_options()
encode = encodeKeyword + Optional(encode_options)
return encode
def _define_encode_options():
#encode strategy
strategyKeyword = (CaselessLiteral('strategy') + Literal('=')).suppress()
strategyOptions = _define_encode_strategies()
strategy = strategyKeyword + Quote + MatchFirst(strategyOptions).setResultsName('encodeStrategy') + Quote
#persist
persistKeyword = (CaselessLiteral('persist') + Literal('=')).suppress()
persistValue = Quote + Word(everythingWOQuotes).setResultsName('encodePersist') + Quote
persist = Optional(persistKeyword + persistValue)
option = MatchFirst([strategy, persist])
encodeOptions = openParen + delimitedList(option, delim=',') + closeParen
return encodeOptions
def _define_encode_strategies():
one_hot = CaselessLiteral("one-hot")
regular = CaselessLiteral("regular")
return [regular, one_hot]
|
987,961 | 73fc2b26f5d8cc96f39615e0e0773368f2fd399a | from __future__ import absolute_import
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
PACKAGE_PATH = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(PACKAGE_PATH, 'README.md'), encoding='utf-8') as fp:
readme = fp.read()
setup(
name='accessdb',
packages=['accessdb'],
version='0.0.1',
description='Fast way to create Access Database',
long_description=readme,
long_description_content_type='text/markdown',
author='Dhana Babu',
author_email='dhana36.m@gmail.com',
url='https://github.com/dhanababum/accessdb',
download_url='https://github.com/dhanababum/accessdb/archive/0.1.tar.gz',
keywords=['python', 'accessdb', 'text'],
classifiers=[],
)
|
987,962 | 983649d57abc6d4a886bbe803091550263b91c30 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
"""
Created on Tru Jun 3 15:13:37 2020
@author: Robinson Montes
"""
import json
def save_to_json_file(my_obj, filename):
"""
Save object to a file
Arguments:
my_obj (obj): The inputed object to convert in json format
filename (str): The name of the output file
Return:
A file with a text in jason format
"""
with open(filename, 'w', encoding='utf-8') as file:
return file.write(json.dumps(my_obj))
|
987,963 | d705603b2672b12c41bcb655713e779237f81a76 | import re
make_list = []
p = re.compile(r"(\w+)\s+(\d+)[-](\d+)[-](\d+)")
m = p.search("park 010-1234-5678")
print(m)
print(m.group(0))
print(m.group(1))
print(m.group(2))
print(m.group(3))
print(m.group(4), '- *******')
h = m.group(4)+'- ********'
print(h)
|
987,964 | f4ae0e6ddab2557d2ae539b3eef6fc93ee211653 | from django.contrib import admin
from .models import Service
class ServiceModelAdmin(admin.ModelAdmin):
list_display = ("service", "price", "cost", "duration", "profit")
admin.site.register(Service, ServiceModelAdmin)
|
987,965 | 0d026efa6b13a5844dc61ad818ae4dea3171bf0a | def partition(a, l, r):
#가장 왼쪽 요소를 피봇으로
pivot = a[l]
i = l
j = r
while i < j:
#피봇보다 큰 요소 찾기
while a[i] <= pivot:
i += 1
#빼먹으면 안 됨
if(i == r): break
#피봇보다 작은 요소 찾기
while a[j] >= pivot :
j -= 1
#빼먹으면 안 됨
if(j == l): break
#찾은 큰 요소와 작은 요소 자리바꾸기
if i < j :
a[i], a[j] = a[j], a[i]
#피봇과 j요소 바꾸기(피봇기준으로 왼쪽은 작은 요소들이, 오른쪽은 큰 요소들이 위치하게 됨)
arr[l], arr[j] = arr[j], arr[l]
#j자리에 위치한 피봇 반환
return j
def quicksort(a, low, high):
if low < high:
pivot = partition(a, low, high)
quicksort(a, low, pivot-1)
quicksort(a, pivot+1, high)
import sys
sys.stdin = open("quick_sort.txt")
for tc in range(int(input())):
N = int(input())
arr = list(map(int, input().split()))
# 원본 수정
quicksort(arr, 0, len(arr)-1)
print("#{} {}".format(tc+1, arr[N//2])) |
987,966 | e315f3fb6fa8178a45519569e8caaab7b3c922ad | import numpy as np
from scipy.integrate import odeint
from equats import traj_and_speed, w_rel_and_quat
import matplotlib.pyplot as plt
from calculate_w_ref import w_ref, w_ref_dif, determineAEP
from ext_moments import mom_gravit
from utils import rotation_to_connected
t_0, nu = 0, 6.809e15
t_final = 500000
t = np.linspace(0, t_final, t_final + 1) # для Нептуна
r0 = 1.0e+07 * np.array([1.715573066233160, 2.289071031477311, 0.892592547997959])
v0 = 1.0e+04 * np.array([1.701250362854970, -1.101411738217119, -0.469847228124423])
r_and_v0 = np.concatenate([r0, v0])
a, e, p = determineAEP(r0, v0, nu)
r_and_v = odeint(traj_and_speed, r_and_v0, t, args=(nu,))
r, v = np.zeros((t.size, 3)), np.zeros((t.size, 3))
r[:], v[:] = r_and_v[:, :3], r_and_v[:, 3:]
j_tenzor_t = np.array([[3348, 0, 0],
[0, 1836, 0],
[0, 0, 4548]])
j_tenzor_c = np.array([[6216, 0, 0],
[0, 6582, 0],
[0, 0, 5509]])
a_t = 0.73
j_tenzor = j_tenzor_c + a_t * j_tenzor_t
init_angle = 5 * np.pi / 180
w_rel_init, quat0 = np.array([0, 0, 0]), np.array([0, 0, np.sin(init_angle / 2), np.cos(init_angle / 2)])
w_rel_and_quat0 = np.concatenate([w_rel_init, quat0])
k_w, k_q = 3, 0.04
w_rel_and_quat = odeint(w_rel_and_quat, w_rel_and_quat0, t, args=(j_tenzor, k_w, k_q))
w_rel, quat = np.zeros((t.size, 3)), np.zeros((t.size, 4))
w_rel[:], quat[:] = w_rel_and_quat[:, :3], w_rel_and_quat[:, 3:]
M_ctrl, w_abs, mom_imp = np.zeros((t.size, 3)), np.zeros((t.size, 3)), np.zeros((t.size, 3))
for i in range(0, t.size):
w_abs[i] = w_rel[i] + rotation_to_connected(quat[i], w_ref(p, t[i], t_0, r[i]))
w_ref_t = np.zeros((t.size, 3))
for i in range(0, t.size):
mom_imp[i] = np.matmul(j_tenzor, w_abs[i])
M_ctrl[i] = -mom_gravit(quat[i], r[i], j_tenzor, nu) + np.cross(w_abs[i], mom_imp[i]) \
- np.matmul(j_tenzor, np.cross(w_rel[i], rotation_to_connected(quat[i], w_ref(p, t[i], t_0, r[i])))) \
+ np.matmul(j_tenzor, rotation_to_connected(quat[i], w_ref_dif(r[i], v[i], p, t[i], t_0))) \
- k_w * w_rel[i] - k_q * quat[i, :3]
w_ref_t[i] = w_ref(p, t[i], t_0, r[i])
plt.plot(t / 3600, M_ctrl[:, 0], label='M_ctrl_x')
plt.plot(t / 3600, M_ctrl[:, 1], label='M_ctrl_y')
plt.plot(t / 3600, M_ctrl[:, 2], label='M_ctrl_z')
plt.legend(loc='best')
plt.title('График зав-ти управляющего момента M_ctrl от времени')
plt.xlabel('t, час')
plt.ylabel('M_ctrl, Н*м')
plt.grid()
plt.show()
plt.plot(t / 3600, w_rel[:, 0], label='w_rel_x')
plt.plot(t / 3600, w_rel[:, 1], label='w_rel_y')
plt.plot(t / 3600, w_rel[:, 2], label='w_rel_z')
plt.legend(loc='best')
plt.title('График зав-ти w_rel от времени')
plt.xlabel('t, час')
plt.ylabel('w_rel, c^-1')
plt.grid()
plt.show()
plt.plot(t / 3600, quat[:, 0], label='quat_x')
plt.plot(t / 3600, quat[:, 1], label='quat_y')
plt.plot(t / 3600, quat[:, 2], label='quat_z')
plt.plot(t / 3600, quat[:, 3], label='quat_scalar')
plt.legend(loc='best')
plt.title('График зав-ти quat от времени')
plt.xlabel('t, час')
plt.ylabel('quat')
plt.grid()
plt.show()
plt.plot(t / 3600, w_abs[:, 0], label='w_abs_x')
plt.plot(t / 3600, w_abs[:, 1], label='w_abs_y')
plt.plot(t / 3600, w_abs[:, 2], label='w_abs_z')
plt.legend(loc='best')
plt.title('График зав-ти w_abs от времени')
plt.xlabel('t, час')
plt.ylabel('w_abs, c^-1')
plt.grid()
plt.show()
#
plt.plot(t / 3600, w_ref_t[:, 0], label='w_ref_x')
plt.plot(t / 3600, w_ref_t[:, 1], label='w_ref_y')
plt.plot(t / 3600, w_ref_t[:, 2], label='w_ref_z')
plt.legend(loc='best')
plt.title('График зав-ти w_ref от времени')
plt.xlabel('t, час')
plt.ylabel('w_ref, c^-1')
plt.grid()
plt.show()
# уравнение на моменты импульса маховиков
# def h_machs(y, t):
# h_mach = y[:3]
# dh_dt = np.zeros(3)
# # print('mom_ctrl_check = ', M_ctrl[int(t / t_final * t_len - 1), :])
# # print('cross = ', np.cross(w_abs[int(t / t_final * t_len - 1), :], h_mach))
# print(int(t / t_final * t_len) - 1)
# dh_dt[:] = -M_ctrl[int(t / t_final * t_len) - 1, :] - np.cross(w_abs[int(t / t_final * t_len) - 1, :], h_mach)
# return dh_dt
#
#
#
# h_mach_init = np.zeros(3)
#
# h_mach = odeint(h_machs, h_mach_init, t)
# t_len = t.size
t_len, dt = t.size, t[1] - t[0]
h_mach_init = np.zeros(3)
H_mach = np.zeros((t_len, 3))
h_mach = h_mach_init
for i in range(0, t.size):
dhdt = -M_ctrl[i] - np.cross(w_abs[i], h_mach)
h_mach = h_mach + dhdt * dt
H_mach[i][:] = h_mach
plt.plot(t / 3600, H_mach[:, 0], label='h_mach_x')
plt.plot(t / 3600, H_mach[:, 1], label='h_mach_y')
plt.plot(t / 3600, H_mach[:, 2], label='h_mach_z')
plt.legend(loc='best')
plt.title('График зав-ти h_mach от времени')
plt.xlabel('t, час')
plt.ylabel('H_mach, м^2·кг/с')
plt.grid()
plt.show()
moment_machoviks = np.zeros((t.size, 3))
for i in range(1, t.size):
moment_machoviks[i] = -(H_mach[i] - H_mach[i - 1]) / dt
plt.plot(t / 3600, moment_machoviks[:, 0], label='moment_machoviks_x')
plt.plot(t / 3600, moment_machoviks[:, 1], label='moment_machoviks_y')
plt.plot(t / 3600, moment_machoviks[:, 2], label='moment_machoviks_z')
plt.legend(loc='best')
plt.title('График зав-ти момента, создаваемого маховиками, от времени')
plt.xlabel('t, час')
plt.ylabel('moment_machoviks, H*m')
plt.grid()
plt.show()
j_tenzor = np.array([[3348, 0, 0],
[0, 1836, 0],
[0, 0, 4548]])
# k_w_and_q = [[0.7, 0.01], [0.65, 0.008], [0.75, 0.02], [0.6, 0.02]]
w_rel_init, quat0 = np.array([0, 0, 0]), np.array([0, 0, 0, 1])
w_rel_and_quat0 = np.concatenate([w_rel_init, quat0])
# k_w, k_q = 0.025, 0.0015
k_w, k_q = 1.2, 0.01
w_rel_and_quat = odeint(w_rel_and_quat, w_rel_and_quat0, t, args=(j_tenzor, k_w, k_q))
w_rel, quat = np.zeros((t.size, 3)), np.zeros((t.size, 4))
w_rel[:], quat[:] = w_rel_and_quat[:, :3], w_rel_and_quat[:, 3:]
M_ctrl, w_abs, mom_imp = np.zeros((t.size, 3)), np.zeros((t.size, 3)), np.zeros((t.size, 3))
for i in range(0, t.size):
w_abs[i] = w_rel[i] + rotation_to_connected(quat[i], w_ref(p, t[i], t_0, r[i]))
w_ref_t = np.zeros((t.size, 3))
for i in range(0, t.size):
mom_imp[i] = np.matmul(j_tenzor, w_abs[i])
M_ctrl[i] = -mom_gravit(quat[i], r[i], j_tenzor, nu) + np.cross(w_abs[i], mom_imp[i]) \
- np.matmul(j_tenzor, np.cross(w_rel[i], rotation_to_connected(quat[i], w_ref(p, t[i], t_0, r[i])))) \
+ np.matmul(j_tenzor, rotation_to_connected(quat[i], w_ref_dif(r[i], v[i], p, t[i], t_0))) \
- k_w * w_rel[i] - k_q * quat[i, :3]
w_ref_t[i] = w_ref(p, t[i], t_0, r[i])
# графики
plt.plot(t / 3600, M_ctrl[:, 0], label='M_ctrl_x')
plt.plot(t / 3600, M_ctrl[:, 1], label='M_ctrl_y')
plt.plot(t / 3600, M_ctrl[:, 2], label='M_ctrl_z')
plt.legend(loc='best')
plt.title('График зав-ти управляющего момента M_ctrl от времени')
plt.xlabel('t, час')
plt.ylabel('M_ctrl, Н*м')
plt.grid()
plt.show()
# plt.plot(t / 3600, w_rel[:, 0], label='w_rel_x')
# plt.plot(t / 3600, w_rel[:, 1], label='w_rel_y')
# plt.plot(t / 3600, w_rel[:, 2], label='w_rel_z')
# plt.legend(loc='best')
# plt.title('График зав-ти w_rel от времени')
# plt.xlabel('t, час')
# plt.ylabel('w_rel, c^-1')
# plt.grid()
# plt.show()
# plt.plot(t / 3600, quat[:, 0], label='quat_x')
# plt.plot(t / 3600, quat[:, 1], label='quat_y')
# plt.plot(t / 3600, quat[:, 2], label='quat_z')
# plt.plot(t / 3600, quat[:, 3], label='quat_scalar')
# plt.legend(loc='best')
# plt.title('График зав-ти quat от времени')
# plt.xlabel('t, час')
# plt.ylabel('quat')
# plt.grid()
# plt.show()
plt.plot(t / 3600, w_abs[:, 0], label='w_abs_x')
plt.plot(t / 3600, w_abs[:, 1], label='w_abs_y')
plt.plot(t / 3600, w_abs[:, 2], label='w_abs_z')
plt.legend(loc='best')
plt.title('График зав-ти w_abs от времени')
plt.xlabel('t, час')
plt.ylabel('w_abs, c^-1')
plt.grid()
plt.show()
plt.plot(t / 3600, w_ref_t[:, 0], label='w_ref_x')
plt.plot(t / 3600, w_ref_t[:, 1], label='w_ref_y')
plt.plot(t / 3600, w_ref_t[:, 2], label='w_ref_z')
plt.legend(loc='best')
plt.title('График зав-ти w_ref от времени')
plt.xlabel('t, час')
plt.ylabel('w_ref, c^-1')
plt.grid()
plt.show()
# уравнение на моменты импульса маховиков
# def h_machs(y, t):
# h_mach = y[:3]
# dh_dt = np.zeros(3)
# # print('mom_ctrl_check = ', M_ctrl[int(t / t_final * t_len - 1), :])
# # print('cross = ', np.cross(w_abs[int(t / t_final * t_len - 1), :], h_mach))
# print(int(t / t_final * t_len) - 1)
# dh_dt[:] = -M_ctrl[int(t / t_final * t_len) - 1, :] - np.cross(w_abs[int(t / t_final * t_len) - 1, :], h_mach)
# return dh_dt
#
#
#
# h_mach_init = np.zeros(3)
#
# h_mach = odeint(h_machs, h_mach_init, t)
# t_len = t.size
t_len, dt = t.size, t[1] - t[0]
h_mach_init = np.zeros(3)
H_mach = np.zeros((t_len, 3))
h_mach = h_mach_init
for i in range(0, t.size):
dhdt = -M_ctrl[i] - np.cross(w_abs[i], h_mach)
h_mach = h_mach + dhdt * dt
H_mach[i][:] = h_mach
plt.plot(t / 3600, H_mach[:, 0], label='h_mach_x')
plt.plot(t / 3600, H_mach[:, 1], label='h_mach_y')
plt.plot(t / 3600, H_mach[:, 2], label='h_mach_z')
plt.legend(loc='best')
plt.title('График зав-ти h_mach от времени')
plt.xlabel('t, час')
plt.ylabel('H_mach, м^2·кг/с')
plt.grid()
plt.show()
|
987,967 | da177da7c0f53cc62187f50e170a006e4d4778a8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# @Desc :
# @Time : 2020-11-14 22:14:22
# @Author : Lydia
# @Site :
# @File : sort.py
# @Software: PyCharm
if __name__ == '__main__':
fruits = ['grape', 'raspberry', 'apple', 'banana']
print(sorted(fruits))
print(fruits)
print(sorted(fruits, reverse=True))
print(sorted(fruits, key=len))
print(sorted(fruits, key=len, reverse=True))
print(fruits.sort())
print(fruits) |
987,968 | ab67c951d869d4da7798881e84ba5ded393f6f0d | import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
import plotly.graph_objects as go
from dash.dependencies import Output, Input
from app import app
from utils.organization_chart import oc
from utils.chorus_dt_handler import ch
from components.html_components import build_figure_container, build_card_indicateur
from components.figures_templates import xaxis_format
# TODO: move make figure function to chorus_dt_components.py in components
def get_donut_by_prestation_type(code_structure=None):
"""
Render and update a donut figure to show emissions distribution by prestation type
"""
# Load chorus dt data based on chosen code_structure
# TODO: improve and standardize data import logic
chorus_dt_df = ch.get_structure_data(code_structure)
prestation_df = chorus_dt_df.groupby(["prestation_type"])["distance"].sum().reset_index()
fig = go.Figure(data=[go.Pie(labels=prestation_df.prestation_type, values=prestation_df["distance"], hole=0.3)])
fig.update_layout(plot_bgcolor="white", template="plotly_white", margin={"t": 30, "r": 30, "l": 30})
return fig
def get_emissions_timeseries(code_structure=None):
"""
Render and update a barplot figure to show emissions evolution with time
"""
# Load chorus dt data based on chosen code_structure
# TODO: improve and standardize data import logic
chorus_dt_df = ch.get_structure_data(code_structure)
chorus_dt_df["year_month"] = chorus_dt_df["date_debut_mission"].dt.to_period("M")
timeseries_df = chorus_dt_df.groupby(["year_month"])["distance"].sum().reset_index()
fig = go.Figure()
fig.add_trace(
go.Scatter(
x=timeseries_df["year_month"].astype(str),
y=timeseries_df["distance"].values,
mode="lines+markers",
line=dict(width=3),
)
)
fig.update_layout(
plot_bgcolor="white", template="plotly_white", margin={"t": 30, "r": 30, "l": 30}, xaxis=xaxis_format
)
return fig
select_prestation_type = dcc.Dropdown(
id="select-prestation_type", options=[{"label": "Train", "value": "T"}, {"label": "Avion", "value": "A"}]
)
cards = dbc.CardDeck(
[
build_card_indicateur("Nombre de trajets", "Nombre de trajets", "2 300"),
build_card_indicateur("Emissions (eCO2)", "Emissions (eCO2)", "2M"),
build_card_indicateur("Indicateur X", "Indicateur X", "XX"),
build_card_indicateur("Indicateur Y", "Indicateur Y", "YY"),
]
)
layout = html.Div(
[
dbc.Row(html.P("", id="values-selected")),
# Cards row
dbc.Row(
[
dbc.Col(
[
dbc.Card(
dbc.CardBody(
[
html.H3("Filtres"),
html.Br(),
dbc.FormGroup([dbc.Label("Type de prestation"), select_prestation_type]),
]
),
className="pretty_container",
),
dbc.Card(
dbc.CardBody(
[html.H3("Exporter les données"), html.Br(), dbc.Button("Export", id="export")]
),
className="pretty_container",
),
dbc.Jumbotron("Explications sur les graphiques et leur fonctionnement..."),
]
),
dbc.Col(
[
cards,
build_figure_container(
title="Répartition des émissions par type de déplacement",
id="donut-by-prestation",
footer="Explications..",
),
],
width=9,
),
]
),
dbc.Row(
[
dbc.Col(
[
build_figure_container(
title="Évolution temporelles des émissions",
id="timeseries-chorus-dt",
footer="Explications..",
)
],
width=12,
)
]
),
],
id="div-data-chorus-dt",
)
@app.callback(Output("timeseries-chorus-dt", "figure"), [Input("dashboard-selected-entity", "children")])
def update_emissions_timeseries(selected_entity):
service = oc.get_entity_by_id(selected_entity)
return get_emissions_timeseries(service.code_chorus)
@app.callback(Output("donut-by-prestation", "figure"), [Input("dashboard-selected-entity", "children")])
def update_donut_by_prestation(selected_entity):
service = oc.get_entity_by_id(selected_entity)
return get_donut_by_prestation_type(service.code_chorus)
|
987,969 | 381cffbf3d3cb6e16bef880e1d8f88410fcfbc6c | #basic functions for ML
#updated from basic.py 22nd September
#IMPORTS
from collections import defaultdict as ddict, OrderedDict as odict
from typing import Any, Dict, List
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from rdkit.Chem import PandasTools, AllChem as Chem, Descriptors
from rdkit.ML.Descriptors.MoleculeDescriptors import MolecularDescriptorCalculator
from rdkit.Chem.Descriptors import MolWt
from sklearn.ensemble import RandomForestRegressor
from sklearn.metrics import mean_absolute_error, r2_score, mean_squared_error
from sklearn.model_selection import KFold, train_test_split
from sklearn.neural_network import MLPRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVR
import sklearn
import torch
import deepchem as dc
import copy
from tqdm import tqdm
import delfos as d
class Model:
"""
Object containing a model and all its associated parameters.
"""
def __init__(self, name, model, model_type, data_type,
lr=1e-3, optimiser=torch.optim.Adam, num_epochs=100, batch_size=32):
self.name = name #e.g. "MPNN with attention"
self.model = model #torch/sklearn regressor object
self.model_type = model_type
self.data_type = data_type #"SMILES" or "descriptors" or "ECFP" or "sentences"
self.experiments = []
#torch specific variables
if self.model_type == 'torch':
self.lr = lr
self.optimiser = optimiser
self.batch_size = batch_size
self.num_epochs = num_epochs
def data_maker(solute, solvent, pka, ids=None):
if ids == None:
pass
else:
[solute,solvent,pka] = [[lis[x] for x in ids] for lis in (solute, solvent, pka)]
#ECFP
featurizer = dc.feat.CircularFingerprint(size=512, radius=3)
sol = featurizer.featurize(solute)
solv = featurizer.featurize(solvent)
ECFP_data = [np.concatenate((sol,solv),axis=1),np.array(pka)]
#descriptors
featurizer = dc.feat.RDKitDescriptors()
sol = featurizer.featurize(solute)
solv = featurizer.featurize(solvent)
desc_data = [np.concatenate((sol,solv),axis=1),np.array(pka)]
#SMILES
SMILES_pairs = [(solute[i],solvent[i]) for i in range(len(solute))]
SMILES_data = [SMILES_pairs, torch.Tensor(pka)]
#sentences
sentence_pairs = d.delfos_data(solute,solvent)
sentence_data = [sentence_pairs, torch.Tensor(pka)]
#collate data
datasets = dict(ECFP=ECFP_data,
descriptors=desc_data,
SMILES=SMILES_data,
sentences=sentence_data)
return datasets
class pka_scaler:
def __init__(self, pka):
self.scaler = sklearn.preprocessing.StandardScaler()
if type(pka) == np.ndarray:
pka = pka.reshape(-1,1)
else:
pka = pka.detach().numpy().reshape(-1,1)
self.scaler.fit(pka)
def transform(self, targets):
if type(targets) == np.ndarray:
targets = targets.reshape(-1,1)
transformed_targets = self.scaler.transform(targets)
return transformed_targets.ravel()
else:
targets = targets.detach().numpy()
transformed_targets = self.scaler.transform(targets)
return torch.Tensor(transformed_targets)
def inverse_transform(self, targets):
if type(targets) == np.ndarray:
targets = targets.reshape(-1,1)
transformed_targets = self.scaler.inverse_transform(targets)
return transformed_targets.ravel()
else:
targets = targets.detach().numpy()
transformed_targets = self.scaler.inverse_transform(targets)
return torch.Tensor(transformed_targets)
class Dataset(torch.utils.data.Dataset):
"""
Creates universal dataset type for torch loaders and regressors.
Parameters
----------
list_IDs : list, np.array
Indices to be used for training/testing
datapoints: List
for MP: List(Tuple(solute_smiles,solvent_smiles))
for RNN: List(Tuple(solute_tensor,solvent_tensor))
Datapoints, either in SMILES (str) or sentence (torch.Tensor) solute/solvent pairs
labels: torch.Tensor
Target values
"""
def __init__(self, list_IDs, datapoints, labels):
self.labels = labels
self.datapoints = datapoints
self.list_IDs = list_IDs
def __len__(self):
return len(self.list_IDs)
def __getitem__(self, index):
ID = self.list_IDs[index]
X = self.datapoints[ID]
y = self.labels[ID]
return X, y
def collate_double(batch):
'''
Collates double input batches for a torch loader.
Parameters
----------
batch: List = [(X,y)]
List of (solute,solvent) pairs with their target value.
Returns
-------
[sol_batch, solv_batch, targets]: List
Type of output depends on if the original dataset contains SMILES or sentences.
Each component is a list / torch.Tensor.
'''
if type(batch[0][0][0]) == str:
sol_batch = [t[0][0] for t in batch]
solv_batch = [t[0][1] for t in batch]
else:
sol_batch = [torch.Tensor(t[0][0]) for t in batch]
sol_batch = torch.nn.utils.rnn.pad_sequence(sol_batch)
solv_batch = [torch.Tensor(t[0][1]) for t in batch]
solv_batch = torch.nn.utils.rnn.pad_sequence(solv_batch)
targets = torch.Tensor([t[1].item() for t in batch])
return [sol_batch, solv_batch, targets]
def double_loader(data, indices, batch_size=64):
'''
torch loader for double inputs.
Parameters
----------
indices : list, np.array
Indices for selected samples.
data : List = [(sol,solv),pka]
Training data of (solute,solvent) pairs and target values.
batch_size : int
Size of selected batches
Returns
-------
loader : torch.utils.data.DataLoader
Batched dataloader for torch regressors
'''
dataset = Dataset(indices, data[0], data[1])
loader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, shuffle=False, collate_fn=collate_double)
return loader
def train(model, ids, data, scaler, datasets):
"""
Train a model.
Parameters
----------
model : Model
Regressor model
ids : list, np.array
Indices for training samples
data : List = [(sol,solv),pka]
Data of (solute,solvent) pairs and target values
Returns
-------
model : Any
Trained regressor model
"""
if model.model_type == 'torch':
solvent = [datasets['SMILES'][0][x][1] for x in ids]
train_ids, val_ids, _, _ = train_test_split(ids, solvent, test_size=0.2, random_state=1)
train_loader = double_loader(data, train_ids, batch_size=model.batch_size)
val_loader = double_loader(data, val_ids, batch_size=len(val_ids))
regressor = copy.deepcopy(model.model)
optimiser = model.optimiser(regressor.parameters(), lr=model.lr)
loss_function = torch.nn.MSELoss()
early_stopping = EarlyStopping(patience=10)
for epoch in range(model.num_epochs):
#train
for (sol,solv,targets) in train_loader:
targets = targets.view(-1,1)
targets = scaler.transform(targets)
optimiser.zero_grad()
outputs = regressor(sol,solv)
loss = loss_function(outputs, targets)
loss.backward()
optimiser.step()
#evaluate
for (sol,solv,targets) in val_loader:
targets = targets.view(-1,1)
targets = scaler.transform(targets)
outputs = regressor(sol,solv)
loss = loss_function(outputs, targets)
val_loss = loss.item()
#early stopping
early_stopping.store(val_loss, regressor)
if early_stopping.stop:
#print("Stopping at epoch "+str(epoch))
break
regressor.load_state_dict(torch.load('checkpoint.pt'))
else:
regressor = sklearn.base.clone(model.model)
targets = scaler.transform(data[1][ids])
regressor.fit(data[0][ids], targets)
return regressor
class EarlyStopping:
def __init__(self, patience=10):
self.patience = patience
self.best_loss = 1e6
self.steps = 0
self.stop = False
def store(self, loss, net):
if loss < self.best_loss:
self.best_loss = loss
self.steps = 0
torch.save(net.state_dict(), 'checkpoint.pt')
else:
self.steps += 1
if self.steps > self.patience:
self.stop = True
def test(model, regressor, ids, data, scaler):
"""
Test a model.
Parameters
----------
model : Model
Regressor model
model_type : str = "sklearn" or "torch"
Type of regressor model
regressor :
Specific regressor for testing
ids : list, np.array
Indices for training samples
data : List = [(sol,solv),pka]
Data of (solute,solvent) pairs and target values
Returns
-------
Results: list
List of MAE, RMSE.
"""
if model.model_type == 'torch':
loader = double_loader(data, ids, batch_size=len(ids))
for (sol,solv,targets) in loader:
outputs = regressor(sol,solv)
outputs = scaler.inverse_transform(outputs)
targets= targets.detach().numpy()
outputs = outputs.detach().numpy()
else:
outputs = regressor.predict(data[0][ids])
outputs = scaler.inverse_transform(outputs)
targets = data[1][ids]
results = [mae(targets, outputs),rmse(targets, outputs)]
return results
def predict(model, experiment, data):
ids = list(range(len(data[0])))
if model.model_type == 'torch':
loader = double_loader(data, ids, batch_size=len(ids))
for (sol,solv,targets) in loader:
outputs = experiment['model'](sol,solv)
outputs = experiment['scaler'].inverse_transform(outputs)
targets= targets.detach().numpy()
outputs = outputs.detach().numpy()
else:
outputs = experiment['model'].predict(data[0][ids])
outputs = experiment['scaler'].inverse_transform(outputs)
targets = data[1][ids]
return targets, outputs
def CV_fit(model, data, datasets, folds=5, random_state: int=None):
"""
Build a cross-validated regressor consisting of k-models.
Parameters
----------
model : torch_model / sklearn_model
Regressor model. [stores trained CV models]
data : List = [(sol,solv),pka]
Full dataset of (solute,solvent) pairs and target values.
folds : int
Number of folds for cross validation.
random_state : int
Integer to use for seeding the k-fold split.
Returns
-------
avg_result : List
List of average MAE and RMSE.
results : List
List of lists of MAE and RMSE for each fold.
"""
kf = KFold(n_splits=folds, shuffle=False, random_state=random_state)
kf = kf.split(X=data[0])
# Fit k models and store them
results = []
for train_ids, test_ids in kf:
scaler = pka_scaler(data[1][train_ids])
if model.data_type == 'descriptors':
desc_scaler = StandardScaler()
desc_scaler.fit(data[0][train_ids])
data[0] = desc_scaler.transform(data[0])
fold_model = train(model, train_ids, data, scaler, datasets)
fold_result = test(model, fold_model, test_ids, data, scaler)
results.append(fold_result)
avg_result = np.mean(results, axis=0)
return avg_result, results
def fit(model, data, test_ids, exp_name, datasets):
"""
Fits a model according to the given test_ids and data.
Parameters
----------
model : torch_model / sklearn_model
Regressor model.
data : List = [(sol,solv),pka]
Full dataset of (solute,solvent) pairs and target values.
test_ids : list, np.array
Selected test set indices.
Returns
-------
trained_model : Any
Trained torch regressor model.
results : List
MAE, RMSE, test set size
"""
if model.model_type == 'torch':
size = len(data[0])
else:
size = data[0].shape[0]
train_ids = [i for i in range(size) if i not in test_ids]
scaler = pka_scaler(data[1][train_ids])
if model.data_type == 'descriptors':
desc_scaler = StandardScaler()
desc_scaler.fit(data[0][train_ids])
data[0] = desc_scaler.transform(data[0])
trained_model = train(model, train_ids, data, scaler, datasets)
results = test(model, trained_model, test_ids, data, scaler)
model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})
return results
#RESULTS HELPERS
def rmse(y_true, y_pred):
"""Helper function"""
return mean_squared_error(y_true, y_pred, squared=False)
def mae(y_true, y_pred):
"""Helper function"""
return mean_absolute_error(y_true, y_pred)
#HYPERPARAMETER OPTIMISATION
from timeit import default_timer as timer
from hyperopt import STATUS_OK, Trials, fmin, tpe
class fitness:
"""
For conducting cross validation on a model with a given set of hyperparameters for optimisation.
Parameters
----------
model_dict : dict
Key word arguments to be fed into a b.Model class.
model_param_names : List
Hyperparameter names specific to the regressor model.
training_param_names : List
Hyperparameter names specific to training.
"""
def __init__(self, model_dict, model_param_names, training_param_names, datasets):
self.m = model_dict
self.model_param_names = model_param_names
self.training_param_names = training_param_names
self.datasets = datasets
def objective(self, params):
"""
Objective function for bayesian hyperparameter optimisation.
Parameters
----------
params : dict
Specific set of model and training hyperparameters for testing.
Returns
-------
dict
Results of CV testing, including MAE loss, runtime and the original parameter list"""
model_params = dict()
training_params = dict()
for param_name in self.model_param_names:
model_params[param_name] = params[param_name]
for param_name in self.training_param_names:
training_params[param_name] = params[param_name]
copy = self.m['model']
self.m['model'] = self.m['model'](**model_params)
self.m.update(training_params)
model = Model(**self.m)
data = self.datasets[model.data_type]
start = timer()
res, full_res = CV_fit(model, data, self.datasets)
run_time = timer()-start
loss = res[0]
self.m['model'] = copy
return {'loss': loss, 'params': params, 'run_time': run_time, 'status': STATUS_OK}
def hyperopt_func(model_dict, model_param_names, training_param_names, param_space, datasets, max_evals=30):
"""
Bayesian hyperparameter optimisation function.
Parameters
----------
model_dict : dict
Key word arguments to be fed into a b.Model class.
model_param_names : List
Hyperparameter names specific to the regressor model.
training_param_names : List
Hyperparameter names specific to training.
param_space : dict
Distribution of choices for each hyperparameter to be optimised.
max_evals : int
Maximum number of evaluations of hyperparameter sets.
Returns
-------
results : list
Results from each evaluation of the objective function, sorted from best to worst result.
"""
tester = fitness(model_dict, model_param_names, training_param_names, datasets)
trials = Trials()
timer_start = timer()
best = fmin(fn=tester.objective,
space=param_space,
algo=tpe.suggest,
max_evals=max_evals,
trials=trials,
rstate=np.random.RandomState(50))
timer_end = timer()
print('Total training time (min):',(timer_end-timer_start)/60)
results = sorted(trials.results, key = lambda x: x['loss'])
return results |
987,970 | 0a37fab81efaba77abd85832a2aa0f8b0e60ef52 | #!/usr/bin/env python3
with open('/home/kami/Projetos/Cod3r/Manipulação_Arquivo/pessoas.csv') as arquivo:
with open('/home/kami/Projetos/Cod3r/Manipulação_Arquivo/pessoas.txt', 'w') as saida:
for registro in arquivo:
pessoa = registro.strip().split(',')
print('Nome: {}, Idade: {}'.format(*pessoa), file=saida)
if arquivo.closed:
print('Arquivo ja foi fechado')
if saida.closed:
print('O arquivo de saida foi fechado')
#nesse codigo, ele leu o arquivo e fez um novo, no caso o txt
|
987,971 | 47012baf23787de2eba489754d4c1a83da13b5f7 | from django.db import models
# Create your models here.
class Comment(models.Model):
user_name=models.CharField(max_length=20)
titleId=models.IntegerField()
date_time=models.DateTimeField(auto_now_add=True)
content=models.CharField(max_length=200,null=True,blank=True)
def __str__(self):
return self.user_name
class Meta:
ordering=['-date_time'] |
987,972 | 60ca389028e84c41a83fe1e1a5ee0cf8ab1a149c | from sys import stdin, stdout
def solve(s, p, totals):
count = 0
for total in totals:
subs = False
for i in range(11):
good = False
for diff in [-2, -1, 0, 1, 2]:
j = i + diff
k = total - (i + j)
if k >= 0 and k <= 10 and j >= 0 and abs(k-j) <= 2 and abs(k-i) <=2:
#print(i, j, k, total)
if k >= p or i >= p or j >= p:
if diff == -2 or diff == 2 or abs(k-i) == 2 or abs(k-j) ==2:
subs = True
else:
good = True
subs = False
break
if good:
count += 1
break
if subs and s > 0:
s -= 1
count += 1
return count
line_count = int(stdin.readline())
for i in range(line_count):
parts = [int(x) for x in stdin.readline()[:-1].split()]
result = solve(parts[1], parts[2], parts[3:])
print("Case #"+str(i+1)+": "+str(result));
|
987,973 | 11495ddbc466e5bcc9fa1693abf256c86a9981d8 | import numpy as np
from matplotlib import pyplot as plt
import random
import sys
def processFile(filename, cat, sup_output = False, verbose = False):
if verbose:
print("filename",filename)
images = np.load(filename)
number_of_exp = images.shape[0]
if not sup_output:
print('Number of',cat,'images: ', number_of_exp)
return images
def reshapeImages(list_images, verbose = False):
reshaped_list = []
for cat in list_images:
if verbose:
print(cat.shape)
reshaped_img = cat.reshape((cat.shape[0],28,28))
reshaped_list.append(reshaped_img)
if verbose:
print(reshaped_img.shape)
return reshaped_list
def loadUpData(cat, sup_output = False):
list_of_images_by_cat = []
for category in cat:
filename='./data/full_numpy_bitmap_'+category+'.npy'
list_of_images_by_cat.append(processFile(filename, category, sup_output))
return list_of_images_by_cat
def random_sample(list_cat_imgs, num_samples, cat_english_labels, sup_output = False, verbose = False):
if sup_output:
verbose = False
#First we need to check that the number of samples is smaller than or equal
#equal to the smallest number of examples for any category
min_number_examples = list_cat_imgs[0].shape[0]
for cat in list_cat_imgs:
examples = cat.shape[0]
if examples < min_number_examples:
min_number_examples = examples
if num_samples > min_number_examples:
if not sup_output:
print("too many samples and not enough examples")
return list_cat_imgs
resampled_cat_imgs = []
#TODO: If ou have time change the list structure to a dict so you don't
# to do this nasty index var 'i' below to associate the label with
# the data
i = 0
for cat in list_cat_imgs:
number_of_training_examples = cat.shape[0]
if not sup_output:
print('Take',num_samples, 'samples from', number_of_training_examples, 'of', cat_english_labels[i] + 's')
#Uniformly select num_samples worth of samples from X
#uncomment seed if you want it to not produce the same results
random.seed(1)
idx = random.sample(range(number_of_training_examples),num_samples)
# Select only the samples that were randomly generated
samplesX = cat[idx,:]
resampled_cat_imgs.append(samplesX)
i += 1
if verbose:
print("index from samples")
print(idx)
print("samples drawn")
print(samplesX)
print(samplesX.shape)
return resampled_cat_imgs
def squish(uniform_list):
#For this to work the list needs to have all of the same num of examples
number_of_examples_per_Cat = uniform_list[0].shape[0]
number_of_cat = len(uniform_list)
total_examples = number_of_examples_per_Cat*number_of_cat
X = np.zeros(shape = (total_examples,28,28), dtype = int)
i = 0
for cat in uniform_list:
lidx = i*number_of_examples_per_Cat
ridx = (i+1)*number_of_examples_per_Cat
X[lidx:ridx,:,:] = cat
i += 1
return X
def expand_labels(num_cat, samples):
y = np.zeros((num_cat*samples),dtype=int)
for i in range(num_cat):
lidx = i*samples
ridx = (i+1)*samples
y[lidx:ridx] = np.repeat(i, samples)
return y
def print_shapes_list(lis):
for cat in lis:
print(cat.shape)
return
def main(sup_outs = False):
categories = ['cannon','eye', 'face', 'nail', 'pear','piano','radio','spider','star','sword']
samples = 10000
num_cat = len(categories)
list_of_cat_images = loadUpData(categories, sup_output = True)
sub_sampled_imgs = random_sample(list_of_cat_images, num_samples = samples, cat_english_labels = categories,sup_output=False)
resahped_list_imgs = reshapeImages(sub_sampled_imgs)
# print_shapes_list(resahped_list_imgs)
X = squish(resahped_list_imgs)
y = expand_labels(num_cat, samples)
return X, y, categories
if __name__ == "__main__":
main()
|
987,974 | 86094677eedbbd5b214df98462d2ffc7823afd5f | #!/usr/bin/env python3
"""
Obsolete: script that assigns clades to sequences based on clade designations in `defaults/clades.tsv`
"""
import numpy as np
import argparse, sys, os
from Bio import AlignIO, SeqIO, Seq, SeqRecord
from Bio.AlignIO import MultipleSeqAlignment
from augur.translate import safe_translate
from augur.align import run as augur_align
from augur.clades import read_in_clade_definitions, is_node_in_clade
from augur.utils import load_features
class alignArgs:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class tmpNode(object):
def __init__(self):
self.sequences = {}
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Assign clades to sequences",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument("--sequences", help="*unaligned* FASTA file of SARS-CoV-2sequences")
group.add_argument("--alignment", help="*aligned* FASTA file of SARS-CoV-2 sequences relative to Wuhan-HU-1 with insertions removed")
parser.add_argument("--output", type=str, default='clade_assignment.tsv', help="tsv file to write clade definitions to")
parser.add_argument("--keep-temporary-files", action='store_true', help="don't clean up")
parser.add_argument("--chunk-size", default=10, type=int, help="process this many sequences at once")
parser.add_argument("--nthreads", default=1, type=int, help="Number of threads to use in alignment")
args = parser.parse_args()
refname = f"defaults/reference_seq.gb"
features = load_features(refname)
if args.sequences:
seqs = SeqIO.parse(args.sequences, 'fasta')
else:
alignment = SeqIO.parse(args.alignment, 'fasta')
ref = SeqIO.read(refname, 'genbank')
clade_designations = read_in_clade_definitions(f"defaults/clades.tsv")
log_fname = "clade_assignment.log"
in_fname = "clade_assignment_tmp.fasta"
out_fname = "clade_assignment_tmp_alignment.fasta"
output = open(args.output, 'w')
print('name\tclade\tparent clades', file=output)
# break the sequences into chunks, align each to the reference, and assign clades one-by-one
done = False
while not done:
# if not aligned, align
if args.sequences:
# generate a chunk with chunk-size sequences
chunk = []
while len(chunk)<args.chunk_size and (not done):
try:
seq = seqs.__next__()
chunk.append(seq)
except StopIteration:
done = True
print(f"writing {len(chunk)} and the reference sequence to file '{in_fname}' for alignment.")
with open(in_fname, 'wt') as fh:
SeqIO.write(chunk, fh, 'fasta')
aln_args = alignArgs(sequences=[in_fname], output=out_fname, method='mafft',
reference_name=None, reference_sequence=refname,
nthreads=args.nthreads, remove_reference=False,
existing_alignment=False, debug=False, fill_gaps=False)
augur_align(aln_args)
alignment = AlignIO.read(out_fname, 'fasta')
else:
done = True
for seq in alignment:
if seq.id==ref.id:
continue
if len(seq.seq)!=len(ref.seq):
import ipdb; ipdb.set_trace()
print(f"ERROR: this file doesn't seem aligned to the reference. {seq.id} as length {len(seq.seq)} while the reference has length {len(ref.seq)}.")
sys.exit(1)
# read sequence and all its annotated features
seq_container = tmpNode()
seq_str = str(seq.seq)
seq_container.sequences['nuc'] = {i:c for i,c in enumerate(seq_str)}
for fname, feat in features.items():
if feat.type != 'source':
seq_container.sequences[fname] = {i:c for i,c in enumerate(safe_translate(feat.extract(seq_str)))}
# for each clade, check whether it matches any of the clade definitions in the tsv
matches = []
for clade_name, clade_alleles in clade_designations.items():
if is_node_in_clade(clade_alleles, seq_container, ref):
matches.append(clade_name)
# print the last match as clade assignment and all others as ancestral clades
# note that this assumes that clades in the tsv are ordered by order of appearence.
# furthermore, this will only work if parent clades don't have definitions that exclude
# child clades, i.e. positions can only be additive for this to work.
if matches:
print(f"{seq.description}\t{matches[-1]}\t{', '.join(matches[:-1])}", file=output)
else:
print(f"{seq.description}\t -- \t", file=output)
output.close()
if not args.keep_temporary_files:
for fname in [log_fname, in_fname, out_fname]:
if os.path.isfile(fname): #won't exist if didn't align
os.remove(fname)
|
987,975 | 7c951bb9f97d3862664c63641a9d3b664e10f73c | # -*- coding: utf-8 -*-
import json
class OCSSSearchRunner:
"""
This runner should perform searches from ocss search logfile against elastic
"""
# define search data
search_data = []
def initialize(self, params):
# check given parameter
if "index" in params and type(params["index"]) is str:
self.index = params["index"]
else:
raise RuntimeError from None
if "source-file" not in params or type(params["source-file"]) is not str:
print("ERROR no source data file given, or wrong format", end=". ")
raise RuntimeError from None
# load / check search data
if self.search_data is None or len(self.search_data) < 1:
with open(params["source-file"]) as json_file:
for line in json_file:
self.search_data.append(json.loads(line))
async def __call__(self, es, params):
self.initialize(params=params)
# perform search here
search = self.search_data.pop()
search_body = search["query"]
search_response = await es.search(body=search_body, index=self.index)
# get time
return search_response["took"], "ms"
def __repr__(self, *args, **kwargs):
return "ocss-search"
|
987,976 | d8b19aac459c94d33cc7d53dc36cc5f6398c0068 | Python 3.4.0 (default, Apr 11 2014, 13:05:18)
[GCC 4.8.2] on linux
Type "copyright", "credits" or "license()" for more information.
>>> def func(x):
if x%3==0:
print(x,"is odd")
>>> func(9)
9 is odd
>>> def func(x):
if x%2==0:
print(x,"iseven")
else:
print(x,"is odd")
>>> func(9)
9 is odd
>>> func(4)
4 iseven
>>>
|
987,977 | dbec6cc718a939cc52bcaf6b01b8a2649491e51c | # This script tests the corpusreader
# Useage: python test_corpusreader.py > test_corpusreader_output.txt
# The results will be printed to test_corpusreader_output.txt
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '../..'))
import pprint
from ml_ner.corpus.corpusreader import CorpusReader
# Create an instance of the CorpusReader class
cr = CorpusReader("/resources/corpora/multilingual/ontonotes-5.0-conll-2012/conll-2012/v4/data/development/data/english/annotations/nw/wsj")
# Extract the NE and its POS tags
ne = cr.extract_labeled_named_entities()
# Pretty print the output
pprint.pprint(ne)
|
987,978 | f074e30cb07a661408f87eeb9492800bda6240cd | from rest_framework.response import Response
from rest_framework.decorators import api_view
from graphs.v2.serializers import *
from datetime import datetime
import queries
mock_data = {
'process_type': 17,
'start': '2018-01-01-08-00-00-000000',
'end': '2018-01-31-08-00-00-000000',
}
@api_view(['GET'])
def production_actuals(request):
process_type = request.GET.get('process_type', mock_data['process_type'])
product_types = request.GET.get('product_types', None)
start = get_date_from_string(request.GET.get('start', mock_data['start']))
end = get_date_from_string(request.GET.get('end', mock_data['end']))
bucketSize = request.GET.get('bucket', 'month')
queryset = queries.get_output_by_bucket(bucketSize, start, end, process_type, product_types)
serializer = ProductionActualsSerializer(queryset, many=True)
return Response(serializer.data)
def get_date_from_string(date):
dateformat = "%Y-%m-%d-%H-%M-%S-%f"
return pytz.utc.localize(datetime.strptime(date, dateformat))
|
987,979 | 2ba4f85c1af458025af9b38a1422d7e6be09b1c6 | #Chapter 20.17
# INSERTION OF A NODE IN A DOUBLY LINKED LIST
class dnode: # node class
def __init__(self):
self.data = 0
self.left = None
self.right = None
def insert(p, q, n): # inser nodes
if(p == None):
p = dnode()
if(p == None):
print("Error")
exit
p.data = n
q = p
else:
temp = dnode() # Create new node
if(temp == None):
print("Error")
exit
temp.data = n
temp.left = q
q.right = temp
q= temp
return p, q
def printfor(p): # print nodes
print("The values in the forward order are")
while(p != None):
print(str(p.data) + str("\t")),
p = p.right
print("\n")
def nodecount(p): # count number of nodes
count = 0
while(p != None):
count = count + 1
p = p.right
return count
def newinsert(p, node_no, value): # Insert new node
if(node_no < 0):
print("Specified node does not exist")
exit()
if(node_no > nodecount(p)):
print("Specified node does not exist")
exit()
if(node_no == 0):
temp = dnode()
if(temp == None):
print("Error cannot allocate")
exit()
temp.data = value
temp.right = p
p = temp
else:
temp = p
i = 1
while(i < node_no):
i = i + 1
temp = temp.right
print("calue at node here" + str(temp.data))
temp1 = dnode()
if(temp1 == None):
print("Error cannot allocate")
exit
temp1.data = value
temp1.right = temp.right
temp1.left = temp
temp1.left.right = temp1
if(temp1.right != None):
temp1.right.left = temp1
return(p)
def main():
start = None
end = None
print("Enter the nodes to be created")
n = int(raw_input())
while(n > 0):
print("Enter the data value to be placed")
x = int(raw_input())
start, end = insert(start, end, x)
n = n - 1
print(nodecount(start))
print("Enter the node number after which new node to be placed")
n = int(raw_input())
print("Enter the data to be placed in the node")
x = int(raw_input())
start = newinsert(start, n, x)
printfor(start)
main()
|
987,980 | 0bdd75bbdedf6eea57836fa1677223084779aa86 | #!/usr/bin/env python
import hackercodecs
from pwn import *
def str_or(a, b):
ret = ''
for i, j in zip(a, b):
ret += chr(ord(i) | ord(j))
return ret
red = open('red').read().strip().replace('\n', '').decode('bin')
green = open('green').read().strip().replace('\n', '').decode('bin')
blue = open('blue').read().strip().replace('\n', '').decode('bin')
print str_or(str_or(red, blue), green)
m2 = '''
01001100011010010110011101101000
00110111010111110100110001100101
01110110011001010110110000110010
00101110011100000110100001110000
'''
print m2.strip().replace('\n', '').decode('bin')
cyan = open('cyan').read().strip().replace('\n', '').decode('bin')
magenta = open('magenta').read().strip().replace('\n', '').decode('bin')
yellow = open('yellow').read().strip().replace('\n', '').decode('bin')
print xor(xor(cyan, magenta), yellow) |
987,981 | 7b3415185339f8b69492c29e050d01dc2e76ef47 | #!/usr/bin/env python3
'''Simple HTTP Server With Upload.
This module builds on BaseHTTPServer by implementing the standard GET
and HEAD requests in a fairly straightforward manner.
see: https://gist.github.com/UniIsland/3346170
'''
__version__ = "0.1"
__all__ = ["simple-py3httpd"]
__author__ = "woody"
__home_page__ = "https://github.com/hyz/"
import os, sys, io
import re
#import posixpath
import http.server
import urllib.request, urllib.parse, urllib.error
import cgi
import shutil
import mimetypes
import html
import subprocess
Null = open(os.devnull) #_Empty()
_xls_txt = 'sheet.txt'
_xlsprint = '../bin/xlsprint'
class Part(object):
#'Content-Disposition' #'application/octet-stream'
def __init__(self): #(self, name=None, filename=None, body=None, headers={}):
#self.headers = None
self.name = None
self.filename = None
self.body = None # io.BytesIO()
self._last = None
def parse(self, line):
if self.body is None:
if line == b'\r\n':
self.body = [] #io.BytesIO()
else:
self._parse_header(line)
else:
if self._last:
self.body.append(self._last)
self._last = line
def end(self):
if self._last:
assert self._last.endswith(b'\r\n')
la = self._last[:-2]
if la:
self.body.append(la)
del self._last
return self
def __str__(self):
return str(self.__dict__)
def _parse_header(self, line):
m = re.match(b'^Content-Disposition.*name="(.*)"; filename="(.*)"\r\n$', line)
if m:
self.name = html.unescape(m.group(1).decode())
self.filename = html.unescape(m.group(2).decode())
#if self._filename == None:
# self._headers[Part.CONTENT_DISPOSITION] = ('form-data; name="%s"' % self._name)
#else:
# self._headers[Part.CONTENT_DISPOSITION] = ('form-data; name="%s"; filename="%s"' % (self._name, self._filename))
def multipart(rfile, content_type):
boundary = rfile.readline().strip()
m = re.match(r'^multipart/form-data;\s*boundary=(.*)$', content_type)
if not m:
raise ValueError('Content_Type invalid')
if not re.match(b'^-*' + m.group(1).encode() + b'-*$', boundary):
raise ValueError('multipart/form-data boundary mismatch')
part = Part()
for line in rfile:
if line.startswith(boundary):
part.end()
yield part
part = Part()
part.parse(line)
class SimpleHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
server_version = "SimpleHTTPWithUpload/" + __version__
#parameter_list ::=
# (defparameter ",")*
# ( "*" [parameter] ("," defparameter)* [, "**" parameter]
# | "**" parameter
# | defparameter [","] )
def print(self, *args, **kwargs):
sep = kwargs.pop('sep', '')
file = kwargs.pop('file', self.wfile)
__builtins__.print(*args, sep=sep, file=file, **kwargs)
def format_html(self, word, th, rows):
def print_row(row, file=self.wfile):
print('<tr>', file=file)
for i,col in enumerate(row):
print(('<td>','<td align="right">')[i==2], file=file)
idx = col.rfind(word)
if idx >= 0:
print(col[0:idx], '<font color="#990012"><u>', word, '</u></font>', col[idx+len(word):], file=file, sep='')
#print(col[0:idx], '<u>', word, '</u>', col[idx+len(word):], file=file, sep='')
else:
print(col, file=file)
print('</td>', file=file)
print('</tr>', file=file)
#print('<li><a href="%s">%s</a>' % (urllib.parse.quote(linkname), cgi.escape(displayname)), file=f)
with io.StringIO() as f:
print('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">', file=f)
print('<HTML>', '<title>Search %s</title>' % word, '<BODY>', file=f)
#print('<h2></h2>' % displaypath, '<hr/>', file=f)
print('<FORM method="GET" action="/?">', file=f)
print('<INPUT type="text" name="k" value="%s"/>' % word, file=f)
print('<INPUT type="submit" value="search"/>', file=f)
print('</FORM>', file=f)
print('<HR>', '<TABLE border="1" cellspacing="0">', file=f)
print_row(th, file=f)
for row in rows:
print_row(row, file=f)
print('</TABLE>', '<HR>', file=f)
print('<FORM ENCTYPE="multipart/form-data" method="POST">', file=f)
print('<INPUT name="file" type="file"/>', file=f)
print('<INPUT type="submit" value="upload"/>', file=f)
print('</FORM>', file=f)
print("</BODY>", "</HTML>", file=f)
utf8 = f.getvalue().encode()
return utf8
def grep(self, word):
#out, _ = subprocess.Popen(['grep', word, _xls_txt], stdout=subprocess.PIPE).communicate()
with open(_xls_txt) as f:
th = f.readline().split('\t')
assert len(th) > 2
rows, tmps, pos = [], [], 0
for i,line in enumerate(f,1): #out.decode().split('\n'): #str(out, 'UTF-8')
r = line.split('\t')
if len(r) != len(th):
continue
if r[2].endswith(word):
# kpos2 = len(r[2]) - r[2].rfind(word)
# if kpos2 < len(r[2]): # or len(r[6]) - r[6].rfind(word) <= len(r[6]):
# rows.append(r)
rows += tmps
tmps = []
pos = i+4
rows.append(r)
elif i <= pos:
rows.append(r)
else:
if len(tmps) >= 3:
tmps.pop(0)
tmps.append(r)
# rows.sort(key=lambda r: len(r[2])-r[2].rfind(word))#(, reverse=True)
return (th, rows)
def do_GET(self):
'''Serve a GET request.'''
path = self.real_path()
if self.path.startswith('/?') and self.querys:
k = self.querys.get('k').strip()
if k:
utf8 = self.format_html(k, *self.grep(k))
self.send_response(200)
self.send_header("Content-type", "text/html; charset=UTF-8")
self.send_header("Content-Length", str(len(utf8)))
#self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
self.wfile.write(utf8)
return
self._get_headers(path)
def do_HEAD(self):
'''Serve a HEAD request.'''
self._get_headers(self.real_path())
def do_POST(self):
with io.StringIO() as f: # io.BytesIO()
suc, info = self._post_file()
suc = ('Failed','Success')[int(suc)]
print('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">', file=f)
print('<HTML>', '<TITLE>Upload Result Page</TITLE>', '<BODY>', file=f)
print('<h2>Upload Result Page</h2>', file=f)
print('<HR/>', '<strong>%s</strong>' % suc, info, file=f)
print('<BR/>', '<a href="%s">back</a>' % self.headers['referer'], file=f)
print('<HR/>', '<small>Powerd By: <a href="https://github.com/hyz">woody</a>.</small>', file=f)
print('</BODY>', '</HTML>', file=f)
content = f.getvalue().encode('UTF-8')
self.send_response(200)
self.send_header("Content-type", "text/html; charset=UTF-8")
self.send_header("Content-Length", str(len(content)))
self.end_headers()
self.wfile.write(content) # self.copyfile(f, self.wfile)
def _post_file(self):
filename = None
for part in multipart(self.rfile, self.headers['content-type']):
part.filename = os.path.normpath(part.filename).strip('./\\')
if part.filename:
filename = part.filename
fullp = self.real_path(filename)
try:
with open(fullp, 'wb') as out:
for b in part.body:
out.write(b)
self.simplyfied_table(fullp)
return (True, 'Upload success: %s' % filename)
except IOError:
return (False, "Upload fail: file=%s" % fullp)
return (False, 'Upload fail: %s' % filename)
def simplyfied_table(self, fullp):
def _readlink(lnk):
try:
return os.readlink(lnk)
except:
return None
def _unlink(pa):
try:
os.unlink(pa)
except:
pass
tmpfn = '/tmp/taobao-helper.xls.txt'
with open(tmpfn, 'w') as out:
if 0 == subprocess.call([_xlsprint, fullp], stdout=out):
lnk_xls = '.xls'
prev_xls = _readlink(lnk_xls)
if prev_xls:
if prev_xls != fullp:
_unlink(prev_xls)
_unlink(lnk_xls)
os.symlink(fullp, lnk_xls)
else:
os.symlink(fullp, lnk_xls)
_unlink(_xls_txt)
os.rename(tmpfn, _xls_txt)
def _get_headers(self, path):
if os.path.isdir(path):
#if not self.path.endswith('/'):
# # redirect browser - doing basically what apache does
# self.send_response(301)
# self.send_header("Location", self.path + "/")
# self.end_headers()
# return Null
for index in "index.html", "index.htm":
index = os.path.join(path, index)
if os.path.exists(index):
path = index
break
else:
return self._do_list_directory(path)
ctype = self.guess_type(path)
try:
f = open(path, 'rb')
fs = os.fstat(f.fileno())
self.send_response(200)
self.send_header("Content-type", ctype)
self.send_header("Content-Length", str(fs.st_size))
self.send_header("Last-Modified", self.date_time_string(fs.st_mtime))
self.end_headers()
shutil.copyfileobj(f, self.wfile)
except IOError:
self.send_error(404, "File not found")
return Null
def _do_list_directory(self, path):
try:
for _, dirs, files in os.walk(path): break
list = [ x for x in dirs + files if not x.startswith('.') ]
# list = [ x for x in os.listdir(path) if not x.startswith('.') ]
except os.error:
self.send_error(404, "No permission to list directory")
return Null
list.sort(key=lambda a: a.lower())
displaypath = cgi.escape(urllib.parse.unquote(self.path))
with io.StringIO() as f:
print('<!DOCTYPE html PUBLIC "-//W3C//DTD HTML 3.2 Final//EN">', file=f)
print('<HTML>', '<title>Directory listing for %s</title>' % displaypath, '<BODY>', file=f)
print('<h2>Directory listing for %s</h2>' % displaypath, '<hr/>', file=f)
print('<FORM ENCTYPE="multipart/form-data" method="POST">', file=f)
print('<INPUT name="file" type="file"/>', file=f)
print('<INPUT type="submit" value="upload"/>', file=f)
print('</FORM>', '<hr><ul>', file=f)
for name in list:
fullname = os.path.join(path, name)
displayname = linkname = name
# Append / for directories or @ for symbolic links
if os.path.isdir(fullname):
displayname = name + "/"
linkname = name + "/"
if os.path.islink(fullname):
displayname = name + "@"
# Note: a link to a directory displays with @ and links with /
print('<li><a href="%s">%s</a>' % (urllib.parse.quote(linkname), cgi.escape(displayname)), file=f)
print('</ul><hr>', file=f)
print('<FORM method="GET" action="/?">', file=f)
print('<INPUT type="text" name="k"/>', file=f)
print('<INPUT type="submit" value="search"/>', file=f)
print('</FORM>', file=f)
print('</BODY>', '</HTML>', file=f)
content = f.getvalue().encode('UTF-8')
#length = f.tell() f.seek(0)
#f.close()
self.send_response(200)
self.send_header("Content-type", "text/html; charset=UTF-8")
self.send_header("Content-Length", str(len(content)))
self.end_headers()
self.wfile.write(content) # self.copyfile(f, self.wfile)
def real_path(self, tail=None): # translated_path(self):
#def make_querys(querys):
# m = {}
# for p in querys.split('&'):
# k,_,v = p.partition('=')
# k = urllib.parse.unquote(k).strip()
# v = urllib.parse.unquote(v).strip()
# if k and v:
# m[k] = v
# return m
path, _, qsl = self.path.partition('?')
if qsl:
self.querys = dict( urllib.parse.parse_qsl(qsl) ) # self.querys = make_querys(self.querys)
if path is self.path:
path,_,_ = path.partition('#')
path = os.path.normpath(urllib.parse.unquote(path)).strip('./\\') # posixpath
if tail:
path = os.path.join(path, tail)
return path or '.'
#return os.path.join(os.getcwd(), path.strip('/'))
#words = path.split('/')
#words = [_f for _f in words if _f]
#path = os.getcwd()
#for word in words:
# drive, word = os.path.splitdrive(word)
# head, word = os.path.split(word)
# if word in (os.curdir, os.pardir):
# continue
# path = os.path.join(path, word)
#return path
def copyfile(self, source, outputfile):
shutil.copyfileobj(source, outputfile)
def guess_type(self, path):
base, ext = os.path.splitext(path) # posixpath
if ext in self.extensions_map:
return self.extensions_map[ext]
ext = ext.lower()
if ext in self.extensions_map:
return self.extensions_map[ext]
else:
return self.extensions_map['']
if not mimetypes.inited:
mimetypes.init() # try to read system mime.types
extensions_map = mimetypes.types_map.copy()
extensions_map.update({
'': 'application/octet-stream', # Default
'.py': 'text/plain',
'.c': 'text/plain',
'.h': 'text/plain',
})
def test(HandlerClass = SimpleHTTPRequestHandler, ServerClass = http.server.HTTPServer):
http.server.test(HandlerClass, ServerClass)
def ch_cwd():
wd = os.path.join( os.getenv('HOME'), 'www' )
if not os.path.isdir(wd):
os.makedirs(wd, exist_ok=True)
os.chdir(wd)
def main():
ch_cwd()
print('cwd', os.getcwd())
httpd = http.server.HTTPServer(('', 80), SimpleHTTPRequestHandler)
if os.getuid() == 0:
os.setegid(1000)
os.seteuid(1000)
httpd.serve_forever()
if __name__ == '__main__':
# work_dir = ('.',sys.argv[1])[sys.argc>1 and os.path.isdir(sys.argv[1])]
main()
|
987,982 | 1d7eed1a7493155ce39afaff7d141c89a3199fd1 | import os
import cv2
import math
import logging
import datetime
import pandas as pd
from PIL import Image
import LPIPS as models
import matlab.engine
import torch
import argparse
from tqdm import tqdm
from logging import handlers
import numpy as np
import yaml
class Logger(object):
level_relations = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
'crit': logging.CRITICAL
}
def __init__(self, filename, level='info', when='D', backCount=3,
fmt='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s'):
self.logger = logging.getLogger(filename)
format_str = logging.Formatter(fmt)
self.logger.setLevel(self.level_relations.get(level))
sh = logging.StreamHandler()
sh.setFormatter(format_str)
th = handlers.TimedRotatingFileHandler(filename=filename, when=when, backupCount=backCount, encoding='utf-8')
th.setFormatter(format_str)
self.logger.addHandler(sh)
self.logger.addHandler(th)
def CalMATLAB(SRFolder, GTFolder):
eng = matlab.engine.start_matlab()
eng.addpath(eng.genpath(eng.fullfile(os.getcwd(), 'MetricEvaluation')))
res = eng.evaluate_results(SRFolder, GTFolder)
res = np.array(res)
res = res.squeeze()
return res
def CalLPIPS(SRFolder, GTFolder):
nameList = os.listdir(SRFolder)
res = []
model = models.PerceptualLoss(model='net-lin', net='alex', use_gpu=False)
for i in nameList:
imageA = os.path.join(SRFolder, i)
imageB = os.path.join(GTFolder, i)
imageA = np.array(Image.open(imageA))
imageB = np.array(Image.open(imageB))
imageA = torch.Tensor((imageA / 127.5 - 1)[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
imageB = torch.Tensor((imageB / 127.5 - 1)[:, :, :, np.newaxis].transpose((3, 2, 0, 1)))
dist = model.forward(imageA, imageB).detach().squeeze().numpy()
res.append(dist)
res = np.array(res)
res = res.squeeze()
return np.mean(res)
parser = argparse.ArgumentParser(description="Evaluate SR results")
parser.add_argument('YAML', type=str, help='configuration file')
args = parser.parse_args()
conf = dict()
with open(args.YAML, 'r', encoding='UTF-8') as f:
conf = yaml.load(f.read())
Datasets = conf['Pairs']['Dataset']
SRFolder = conf['Pairs']['SRFolder']
GTFolder = conf['Pairs']['GTFolder']
Metric = ['Ma', 'NIQE', 'PI', 'PSNR', 'SSIM', 'MSE', 'RMSE', 'LPIPS']
Name = conf['Name']
Echo = conf['Echo']
output = Name + datetime.datetime.now().strftime('-%Y%m%d%H%M%S')
if not os.path.isdir('../evaluate'):
os.mkdir('../evaluate')
os.mkdir(os.path.join('../evaluate', output))
log = Logger(os.path.join('../evaluate', output + '.log'), level='info')
log.logger.info('Init...')
log.logger.info('SRFolder - ' + str(Datasets))
log.logger.info('GTFolder - ' + str(GTFolder))
log.logger.info('SRFolder - ' + str(SRFolder))
log.logger.info('Metric - ' + str(Metric))
log.logger.info('Name - ' + Name)
log.logger.info('Echo - ' + str(Echo))
res = pd.DataFrame(columns=('PI', 'Ma', 'NIQE', 'MSE', 'RMSE', 'PSNR', 'SSIM', 'LPIPS'))
for i, j, k in zip(Datasets, SRFolder, GTFolder):
log.logger.info('Calculating ' + i + '...')
assert set(os.listdir(j)) == set(os.listdir(k)), 'SR pictures and GT pictures are not matched.'
MATLAB = CalMATLAB(j, k)
LPIPS = CalLPIPS(j, k)
resDict = dict()
resDict['PI'] = [MATLAB[0]]
resDict['Ma'] = [MATLAB[1]]
resDict['NIQE'] = [MATLAB[2]]
resDict['MSE'] = [MATLAB[3]]
resDict['RMSE'] = [MATLAB[4]]
resDict['PSNR'] = [MATLAB[5]]
resDict['SSIM'] = [MATLAB[6]]
resDict['LPIPS'] = [LPIPS]
resDataFrame = pd.DataFrame(resDict)
resDataFrame.index = [i]
res = res.append(resDataFrame)
if Echo:
log.logger.info('[' + i + '] PI - ' + str(MATLAB[0]))
log.logger.info('[' + i + '] Ma - ' + str(MATLAB[1]))
log.logger.info('[' + i + '] NIQE - ' + str(MATLAB[2]))
log.logger.info('[' + i + '] MSE - ' + str(MATLAB[3]))
log.logger.info('[' + i + '] RMSE - ' + str(MATLAB[4]))
log.logger.info('[' + i + '] PSNR - ' + str(MATLAB[5]))
log.logger.info('[' + i + '] SSIM - ' + str(MATLAB[6]))
log.logger.info('[' + i + '] LPIPS - ' + str(LPIPS))
res.to_csv(os.path.join('../evaluate', output, Name + '.csv'), header=True, index=True)
res.to_excel(os.path.join('../evaluate', output, Name + '.xlsx'), header=True, index=True)
log.logger.info('Done.')
|
987,983 | f2d7df228cca9f4a1b479ff38447cd2b57d51425 | # -*- coding: utf-8 -*-
# @Time : 2018/2/22 9:58
# @Author : Yeh
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
import sys
class Solution:
maxValue=-100000
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root== None:
return 0
self.pathDown(root)
return self.maxValue
def pathDown(self,root):
if root == None:
return 0
left =max(0,self.pathDown(root.left))
right =max(0,self.pathDown(root.right))
self.maxValue =max(self.maxValue,left+right+root.val)
return max(left,right)+root.val
from BuildTree import binaryTree
arr =[-3]
tree =binaryTree()
root =tree.build(arr)
demo = Solution()
print(demo.maxPathSum(root)) |
987,984 | 801ab63283f2c5ef1f14b15e714374310b722cee | # Generated by Django 3.0.7 on 2020-11-10 16:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('test2', '0005_remove_activities_email'),
]
operations = [
migrations.AddField(
model_name='activities',
name='user',
field=models.ForeignKey(default=1, on_delete=django.db.models.deletion.SET_DEFAULT, to='test2.Register_User'),
),
]
|
987,985 | f436b689f7512711ac1303b077ddaf55441e6700 | import requests
from bs4 import BeautifulSoup
import re
response = requests.get('https://ja.wikipedia.org/wiki/%E6%97%A5%E6%9C%AC%E3%81%AE%E8%A6%B3%E5%85%89%E5%9C%B0%E4%B8%80%E8%A6%A7')
soup = BeautifulSoup(response.text, 'html.parser')
data = soup.find_all('a', href=re.compile('/wiki/.*'))
data_arr = [i.get_text() for i in data]
result = []
for i in data_arr[6:]:
if i == "日本の観光地":
break
if i == '':
continue
result.append(i)
unique_result = list(set(result))
insert_str = "INSERT INTO tag(name) VALUES "
for i in range(len(unique_result)):
if len(unique_result)-1 == i:
insert_str += "(" + unique_result[i] + ");"
else:
insert_str += "(" + unique_result[i] + "),"
print(insert_str) |
987,986 | f35cb7107d30b6163bc3e14278d6cfb3d502da34 |
import re
# constants
re_reply = re.compile(
r'@(\w+)'
)
re_url = re.compile(
r'(?<!"|\()((https?|ftp|gopher|file)://(\w|\.|/|\(|\)|\?|=|%|&|:|#|_|-|~|\+)+)'
)
re_anchor = re.compile(
r'(<\s*a[^<>]*)(>(?!(https?|ftp|gopher|file)://)(.(?!<\s*/\s*a\s*>))*.<\s*/\s*a\s*>)'
)
re_trackback = re.compile(
r'(<\s*(link|a)[^<>]*)(((rel\s*=\s*[\'"](?P<rela>[^\'"]*)[\'"])([^<>]*)(href\s*=\s*[\'"](?P<urla>[^\'"]*)[\'"]))|((href\s*=\s*[\'"](?P<urlb>[^\'"]*)[\'"])([^<>]*)(rel\s*=\s*[\'"](?P<relb>[^\'"]*)[\'"])))'
)
re_html = re.compile(
r'<[^<]*?/?>'
)
|
987,987 | 0f380450ebd44f4acc35abdbe7aa4114bc9343af | from hello import greeting
greeting("do something else")
|
987,988 | acd9a7fbef77c153777fae9b5a7aacd2da58b686 | # Generated by Django 2.2 on 2020-09-22 12:50
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Poll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100, verbose_name='название')),
('start_date', models.DateField(verbose_name='дата начала')),
('finish_date', models.DateField(verbose_name='дата окончания')),
('is_published', models.BooleanField(default=False)),
('created_at', models.DateTimeField(auto_now_add=True)),
],
options={
'ordering': ['is_published', '-created_at'],
},
),
migrations.CreateModel(
name='Question',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('question_text', models.CharField(max_length=200, verbose_name='название')),
('question_type', models.CharField(choices=[('T', 'Ответ в свободной форме'), ('1', 'Выбор одного варианта'), ('M', 'Выбор нескольких вариантов')], max_length=1, verbose_name='тип')),
('position', models.PositiveSmallIntegerField(default=0, verbose_name='прядок следования')),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='questions', to='polls.Poll')),
],
options={
'ordering': ['-position'],
},
),
migrations.CreateModel(
name='PassedPoll',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('auid', models.PositiveSmallIntegerField(null=True)),
('passed_at', models.DateTimeField(auto_now_add=True)),
('poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='passed_polls', to='polls.Poll')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='passed_polls', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='AnswerChoice',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('choice_text', models.CharField(max_length=100, verbose_name='название')),
('position', models.PositiveSmallIntegerField(default=0, verbose_name='прядок следования')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answer_choices', to='polls.Question')),
],
options={
'ordering': ['-position'],
},
),
migrations.CreateModel(
name='Answer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('answer_text', models.CharField(max_length=100, null=True, verbose_name='текст ответа')),
('choice', models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='polls.AnswerChoice')),
('passed_poll', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='answers', to='polls.PassedPoll')),
('question', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='polls.Question')),
],
),
]
|
987,989 | e4005b1fb09ee2a5d58da2bab8a09a94cad080b5 | """
Benchmark evaluating evy's performance at speaking to itself over a localhost socket.
Profiling and graphs
====================
You can profile this program and obtain a call graph with `gprof2dot` and `graphviz`:
```
python -m cProfile -o output.pstats path/to/this/script arg1 arg2
gprof2dot.py -f pstats output.pstats | dot -Tpng -o output.png
```
It generates a graph where a node represents a function and has the following layout:
```
+------------------------------+
| function name |
| total time % ( self time % ) |
| total calls |
+------------------------------+
```
where:
* total time % is the percentage of the running time spent in this function and all its children;
* self time % is the percentage of the running time spent in this function alone;
* total calls is the total number of times this function was called (including recursive calls).
An edge represents the calls between two functions and has the following layout:
```
total time %
calls
parent --------------------> children
```
where:
* total time % is the percentage of the running time transfered from the children to this parent (if available);
* calls is the number of calls the parent function called the children.
"""
import time
import benchmarks
import socket as socket_orig
BYTES = 1000
SIZE = 1
CONCURRENCY = 50
TRIES = 5
def reader (sock):
expect = BYTES
while expect > 0:
d = sock.recv(min(expect, SIZE))
expect -= len(d)
def writer (addr, socket_impl):
sock = socket_impl(socket_orig.AF_INET, socket_orig.SOCK_STREAM)
sock.connect(addr)
sent = 0
while sent < BYTES:
d = 'xy' * (max(min(SIZE / 2, BYTES - sent), 1))
sock.sendall(d)
sent += len(d)
####################################################################################################
def launch_green_threads ():
from evy.patched import socket
import evy
def green_accepter (server_sock, pool):
for i in xrange(CONCURRENCY):
sock, addr = server_sock.accept()
pool.spawn_n(reader, sock)
pool = evy.GreenPool(CONCURRENCY * 2 + 1)
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(50)
addr = ('localhost', server_sock.getsockname()[1])
pool.spawn_n(green_accepter, server_sock, pool)
for i in xrange(CONCURRENCY):
pool.spawn_n(writer, addr, socket.socket)
pool.waitall()
def launch_heavy_threads ():
import threading
import socket
def heavy_accepter (server_sock, pool):
import threading
for i in xrange(CONCURRENCY):
sock, addr = server_sock.accept()
t = threading.Thread(None, reader, "reader thread", (sock,))
t.start()
pool.append(t)
threads = []
server_sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_sock.bind(('localhost', 0))
server_sock.listen(50)
addr = ('localhost', server_sock.getsockname()[1])
accepter_thread = threading.Thread(None, heavy_accepter, "accepter thread",
(server_sock, threads))
accepter_thread.start()
threads.append(accepter_thread)
for i in xrange(CONCURRENCY):
client_thread = threading.Thread(None, writer, "writer thread", (addr, socket.socket))
client_thread.start()
threads.append(client_thread)
for t in threads:
t.join()
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser()
parser.add_option('--compare-threading', action = 'store_true', dest = 'threading',
default = False)
parser.add_option('-b', '--bytes', type = 'int', dest = 'bytes',
default = BYTES)
parser.add_option('-s', '--size', type = 'int', dest = 'size',
default = SIZE)
parser.add_option('-c', '--concurrency', type = 'int', dest = 'concurrency',
default = CONCURRENCY)
parser.add_option('-t', '--tries', type = 'int', dest = 'tries',
default = TRIES)
opts, args = parser.parse_args()
BYTES = opts.bytes
SIZE = opts.size
CONCURRENCY = opts.concurrency
funcs = [launch_green_threads]
if opts.threading:
funcs.append(launch_heavy_threads)
print
print "measuring results for %d iterations..." % opts.tries
print
results = benchmarks.measure_best(opts.tries, 3, lambda: None, lambda: None, *funcs)
print "green:", results[launch_green_threads]
if opts.threading:
print "threads:", results[launch_heavy_threads]
print "%", ((results[launch_green_threads] - results[launch_heavy_threads]) /
(results[launch_heavy_threads] * 100))
|
987,990 | 5ddb0695019cef62d8f2b01a87bafaa702dcd970 | import json
import requests
import os
from pathlib import Path
def get_rdap_info(ip_address, force_update_cache=False):
"""
Gets rdap information about from ip address
https://rdap.arin.net
"""
print('Retrieving RDAP from', ip_address)
rdap_info = None
if not force_update_cache:
rdap_info = get_rdap_info_from_cache(ip_address)
if not rdap_info:
api_url = f'https://rdap.arin.net/registry/ip/{ip_address}'
headers = {
'accept': 'application/json',
'content-type': 'application/json'
}
response = requests.get(api_url, headers=headers)
if response.status_code >= 429 and response.status_code < 500:
alt_api_url = f'https://www.rdap.net/ip/{ip_address}'
alt_response = requests.get(alt_api_url, headers=headers)
rdap_info = json.loads(alt_response.content.decode('utf-8'))
store_rdap_info_in_cache(ip_address, rdap_info)
return rdap_info
elif response.status_code == 200:
rdap_info = json.loads(response.content.decode('utf-8'))
store_rdap_info_in_cache(ip_address, rdap_info)
return rdap_info
else:
print('Something went wrong obtaing RDAP from', ip_address)
return rdap_info
def store_rdap_info_in_cache(ip_address: str, rdap_info: dict):
"""
Creates a JSON file with the info found
"""
file_name = rdap_info_cache_file_name(ip_address)
with open(file_name, 'w') as json_file:
json_file.write(json.dumps(rdap_info, indent=4))
def get_rdap_info_from_cache(ip_address: str) -> dict:
"""
Creates a JSON file with the info found
"""
file_name = rdap_info_cache_file_name(ip_address)
if os.path.isfile(file_name):
with open(file_name, 'r') as json_file:
return json.load(json_file)
return None
def rdap_info_cache_directory() -> str:
"""
Gets the location of the cache directory
"""
current_path = Path(__file__).resolve().parent
return os.path.join(current_path, 'cache', 'rdap')
def rdap_info_cache_file_name(ip_address: str) -> str:
"""
Gets file location in the cache
"""
return os.path.join(rdap_info_cache_directory(), f'{ip_address}.json')
|
987,991 | 30205067d345bb02f2b8fd202c3e09a7e84e6cdc | # '''functions'''
# #Question 1: Take two number print and return sum
# #Question 2: Extend Question 1 by passing an arbitary amount of ints.
# def sum_numbers(a,b,*numbers):
# sum = a + b
# for number in numbers:
# sum += number
# return sum
# sum = sum_numbers(1,1,1,1)
# print(sum)
# #Question 3 Pass an arbitary amount of named arguments and create a dictionary.
# def create_dictionary(**kwargs):
# dictionary = {}
# for key, value in kwargs.items():
# dictionary[key] = value
# return dictionary
# dictionary = create_dictionary(a=1, b=2, c=3, d=4)
# print(dictionary) |
987,992 | a37278f117e17c92b6a675c8ac22faf90edebc05 |
def test_list_customers(app):
response = app.get('/customers', params={"skip": 0, "limit": 10})
assert response.status_code == 200
customers = response.json()
assert len(customers) == 10
assert customers[0] == {
"id": 1,
"first_name": "MARY",
"last_name": "SMITH",
}
def test_get_customer(app):
response = app.get('/customers/1')
assert response.status_code == 200
customer = response.json()
assert customer == {
"id": 1,
"first_name": "MARY",
"last_name": "SMITH",
"address": "1913 Hanoi Way",
"city": "Sasebo",
"country": "Japan",
"district": "Nagasaki",
"phone": "28303384290",
}
def test_get_customer_rentals(app):
response = app.get('/customers/1/rentals', params={"skip": 0, "limit": 10})
assert response.status_code == 200
rentals = response.json()
assert len(rentals) == 10
assert rentals[0] == {
"film_id": 611,
"rental_date": "2005-06-15T00:54:12",
"days_rented": 8,
"cost": 5.99,
}
def test_list_available_films(app):
response = app.get('/available_films', params={"skip": 0, "limit": 10})
assert response.status_code == 200
available_films = response.json()
assert len(available_films) == 10
assert available_films[0] == {
"id": 3,
"title": "ADAPTATION HOLES",
"category": "Documentary",
"description": "A Astounding Reflection of a Lumberjack And a Car who must Sink a Lumberjack in A Baloon Factory",
"rating": "NC-17",
"rental_duration": "7",
}
def test_get_film_details(app):
response = app.get('/films/1')
assert response.status_code == 200
film = response.json()
actors = film.pop("actors")
assert film == {
"id": 1,
"title": "ACADEMY DINOSAUR",
"category": "Documentary",
"description": "A Epic Drama of a Feminist And a Mad Scientist who must Battle a Teacher in The Canadian Rockies",
"rating": "PG",
"rental_duration": "6",
"length": "86",
"replacement_cost": 20.99,
"special_features": [
"Deleted Scenes",
"Behind the Scenes",
],
}
assert len(actors) == 10
assert actors[0] == {
"first_name": "PENELOPE",
"last_name": "GUINESS",
"actor_id": 1,
}
def test_get_film_renters(app):
response = app.get('/films/1/renters', params={"skip": 0, "limit": 10})
assert response.status_code == 200
renters = response.json()
assert len(renters) == 10
assert renters[0] == {
"id": 8,
"first_name": "SUSAN",
"last_name": "WILSON",
}
|
987,993 | d0949798e40b1fee964a3bb87bfbb34728a8e262 | # standard library
import sys
import argparse
# third-party
pass
# local
import rsync_system_backup
if not len(sys.argv) > 1:
print("WARNING: you didn't specify any arguments, therefore appending --help..")
sys.argv.append("--help")
from rsync_system_backup.cli import *
main() |
987,994 | 95aab1716b7b227092ef2cbab42af21242865c3b | import os
SECRET_KEY = os.urandom(32)
APP_DIR = os.path.dirname(os.path.realpath(__file__))
DEBUG = True
SQL_LOGGING = False
DATABASE_PATH = os.path.join(
os.environ.get('DATABASE_DIR', APP_DIR), 'patch_server.db')
if os.name == 'nt':
SQLALCHEMY_DATABASE_URI = r'sqlite:///{}' .format(DATABASE_PATH)
APP_DIR = APP_DIR.replace("\\", "\\\\")
SQLALCHEMY_DATABASE_URI = SQLALCHEMY_DATABASE_URI.replace("\\", "\\\\")
else:
SQLALCHEMY_DATABASE_URI = r'sqlite:////{}' .format(DATABASE_PATH)
SQLALCHEMY_TRACK_MODIFICATIONS = False
print("here is : ", APP_DIR, DATABASE_PATH, SQLALCHEMY_DATABASE_URI)
RESET_API_TOKEN = os.path.exists(os.path.join(APP_DIR, 'reset_api_token'))
|
987,995 | 3147e9c63958abb21cc698b285959de3d0216610 | import csv
import sys
username = input('Enter username: ').strip()
password = input('Enter password: ').strip()
user_present = False
fail_msg = 'User Not Found'
if len(username) == 0 or len(password) == 0:
print('Enter valid credentials!')
sys.exit()
try:
with open('users.csv', mode='r') as file:
contents = csv.DictReader(file)
for user in contents:
if user['username'] == username:
if user['password'] == password:
user_present = True
break
else:
fail_msg = 'Wrong Password'
if user_present:
print('Login Successful')
else:
print(fail_msg)
except:
print('No Record Found!')
|
987,996 | 6f5abd237c95b6590f222c0e5c2dbaf1c7243e99 | #No method is needed to iterate over a dictionary:
d = {'A': 'Apple', 'B': 'Ball', 'C': 'Cat'}
for Key in d:
print(Key)
#But it's possible to use the method iterkeys():
for key in d.iterkeys():
print(key)
#The method itervalues() is a convenient way for iterating directly over the values:
for val in d.itervalues():
print(val)
#The above loop is of course equivalent to the following one:
for key in d:
print(d[key])
|
987,997 | 503450aaa6cf25bc62f0603a4226955a14577716 | total=0
for number in [1,2,3,4,5,6,7,8,9,10]:
num=input("Enter #" + str(number) +": ")
total = float(total) + num
if number == 1:
high = num
if num > high:
high = num
average = total / 10.0
print
print " Total =", int(total)
print "Average =", average
print "Largest =", high |
987,998 | 7df400f36e824c87427ad2fd60e5542132565fc4 | import numpy as np
import astropy
from astropy.io import fits
import matplotlib
import matplotlib.pyplot as plt
#from astropy.nddata import NDData
from astropy.nddata import CCDData
import ccdproc
import astropy.units as u
from astropy.modeling import models
from ccdproc import Combiner
import os
import mycode
import m2fs_process as m2fs
from astropy.nddata import StdDevUncertainty
from copy import deepcopy
matplotlib.use('TkAgg')
directory='/nfs/nas-0-9/mgwalker.proj/m2fs/'
m2fsrun='jan20'
datadir=m2fs.get_datadir(m2fsrun)
utdate=[]
file1=[]
file2=[]
flatfile=[]
tharfile=[]
field_name=[]
scifile=[]
fibermap_utdate=[]
fibermap_file=[]
with open(directory+m2fsrun+'_science_raw') as f:
data=f.readlines()[0:]
for line in data:
p=line.split()
if p[0]!='none':
utdate.append(str(p[0]))
file1.append(int(p[1]))
file2.append(int(p[2]))
flatfile.append(p[3])
tharfile.append(p[4])
field_name.append(p[5])
scifile.append(p[6])
with open(directory+m2fsrun+'_fibermap_raw') as f:
data=f.readlines()[0:]
for line in data:
p=line.split()
if p[0]!='none':
fibermap_utdate.append(str(p[0]))
fibermap_file.append(str(p[1]))
utdate=np.array(utdate)
file1=np.array(file1)
file2=np.array(file2)
flatfile=np.array(flatfile)
tharfile=np.array(tharfile)
field_name=np.array(field_name)
scifile=np.array(scifile)
fibermap_utdate=np.array(fibermap_utdate)
fibermap_file=np.array(fibermap_file)
flatfile0=[]
tharfile0=[]
scifile0=[]
allfile0=[]
mapfile0=[]
for i in range(0,len(tharfile)):
flatfile0.append(flatfile[i].split('-'))
tharfile0.append(tharfile[i].split('-'))
scifile0.append(scifile[i].split('-'))
allfile0.append(flatfile[i].split('-')+tharfile[i].split('-')+scifile[i].split('-'))
flatfile0=np.array(flatfile0,dtype='object')
tharfile0=np.array(tharfile0,dtype='object')
scifile0=np.array(scifile0,dtype='object')
allfile0=np.array(allfile0,dtype='object')
for i in range(0,len(fibermap_file)):
mapfile0.append(fibermap_file[i].split('-'))
for i in range(0,len(utdate)):
for j in allfile0[i]:
for ccd in (['b','r']):
out=datadir+utdate[i]+'/'+ccd+str(j).zfill(4)+'_stitched.fits'
for chip in (['c1','c2','c3','c4']):
master_bias=astropy.nddata.CCDData.read(directory+m2fsrun+'_'+ccd+'_'+chip+'_master_bias.fits')
obs_readnoise=np.float(master_bias.header['obs_rdnoise'])
master_dark=astropy.nddata.CCDData.read(directory+ccd+'_'+chip+'_master_dark.fits')
filename=datadir+utdate[i]+'/'+ccd+str(j).zfill(4)+chip+'.fits'
data=astropy.nddata.CCDData.read(filename,unit=u.adu)#header is in data.meta
gain=np.float(data.header['egain'])
print(filename,data.header['object'],data.header['binning'])
oscan_subtracted=ccdproc.subtract_overscan(data,overscan=data[:,1024:],overscan_axis=1,model=models.Polynomial1D(3),add_keyword={'oscan_corr':'Done'})
trimmed1=ccdproc.trim_image(oscan_subtracted[:,:1024],add_keyword={'trim1':'Done'})
trimmed2=ccdproc.trim_image(trimmed1[:1028,:1024],add_keyword={'trim2':'Done'})
debiased0=ccdproc.subtract_bias(trimmed2,master_bias)
dedark0=ccdproc.subtract_dark(debiased0,master_dark,exposure_time='exptime',exposure_unit=u.second,scale=True,add_keyword={'dark_corr':'Done'})
data_with_deviation=ccdproc.create_deviation(dedark0,gain=data.meta['egain']*u.electron/u.adu,readnoise=obs_readnoise*u.electron)
gain_corrected=ccdproc.gain_correct(data_with_deviation,data_with_deviation.meta['egain']*u.electron/u.adu,add_keyword={'gain_corr':'Done'})
# master_dark_gain_corrected=ccdproc.gain_correct(master_dark,master_dark.meta['egain']*u.electron/u.adu,add_keyword={'gain_corr':'Done'})
# master_bias_gain_corrected=ccdproc.gain_correct(master_bias,master_bias.meta['egain']*u.electron/u.adu,add_keyword={'gain_corr':'Done'})
gain_corrected2=deepcopy(gain_corrected)
exptime_ratio=np.float(data.header['exptime'])/np.float(master_dark.meta['exptime'])
for k in range(0,len(gain_corrected2.data)):
for q in range(0,len(gain_corrected2.data[k])):
gain_corrected2.uncertainty.quantity.value[k][q]=(np.max(np.array([gain_corrected2.data[k][q]+master_dark.data[k][q]*gain*exptime_ratio+2.+obs_readnoise**2+(master_dark.uncertainty.quantity.value[k][q]*gain*exptime_ratio)**2+(master_bias.uncertainty.quantity.value[k][q]*gain)**2,0.6*(obs_readnoise**2+(master_dark.uncertainty.quantity.value[k][q]*gain*exptime_ratio)**2+(master_bias.uncertainty.quantity.value[k][q]*gain)**2)])))**0.5##rescale variances using empirically-determined fudges that hold when readnoise ~ 2.5 electrons (via S. Koposov, private comm. May 2020)
# poop1=(np.max(np.array([gain_corrected2.data[k][q]+master_dark.data[k][q]*gain*exptime_ratio+2.+obs_readnoise**2+(master_dark.uncertainty.quantity.value[k][q]*gain*exptime_ratio)**2+(master_bias.uncertainty.quantity.value[k][q]*gain)**2,0.6*(obs_readnoise**2+(master_dark.uncertainty.quantity.value[k][q]*gain*exptime_ratio)**2+(master_bias.uncertainty.quantity.value[k][q]*gain)**2)])))**0.5##rescale variances using empirically-determined fudges that hold when readnoise ~ 2.5 electrons (via S. Koposov, private comm. May 2020)
# poop2=(np.max(np.array([gain_corrected2.data[k][q]+2.+obs_readnoise**2,0.6*(obs_readnoise**2)])))**0.5##rescale variances using empirically-determined fudges that hold when readnoise ~ 2.5 electrons (via S. Koposov, private comm. May 2020)
# print(poop1/poop2)
# cr_cleaned=ccdproc.cosmicray_lacosmic(gain_corrected,sigclip=10)
# bad=np.where(gain_corrected.data<0.)
# bad=np.where(gain_corrected._uncertainty.quantity.value!=gain_corrected._uncertainty.quantity.value)#bad variances due to negative counts after overscan/bias/dark correction
# gain_corrected.uncertainty.quantity.value[bad]=obs_readnoise
if chip=='c1':
c1_reduce=gain_corrected2
if chip=='c2':
c2_reduce=gain_corrected2
if chip=='c3':
c3_reduce=gain_corrected2
if chip=='c4':
c4_reduce=gain_corrected2
left_data=np.concatenate((c1_reduce,np.flipud(c4_reduce)),axis=0)#left half of stitched image
left_uncertainty=np.concatenate((c1_reduce.uncertainty._array,np.flipud(c4_reduce.uncertainty._array)),axis=0)
left_mask=np.concatenate((c1_reduce.mask,np.flipud(c4_reduce.mask)),axis=0)
right_data=np.concatenate((np.fliplr(c2_reduce),np.fliplr(np.flipud(c3_reduce))),axis=0)#right half of stitched image
right_uncertainty=np.concatenate((np.fliplr(c2_reduce.uncertainty._array),np.fliplr(np.flipud(c3_reduce.uncertainty._array))),axis=0)
right_mask=np.concatenate((np.fliplr(c2_reduce.mask),np.fliplr(np.flipud(c3_reduce.mask))),axis=0)
stitched_data=np.concatenate((left_data,right_data),axis=1)
stitched_uncertainty=np.concatenate((left_uncertainty,right_uncertainty),axis=1)
stitched_mask=np.concatenate((left_mask,right_mask),axis=1)
stitched=astropy.nddata.CCDData(stitched_data,unit=u.electron,uncertainty=StdDevUncertainty(stitched_uncertainty),mask=stitched_mask)
# bad=np.where(stitched_uncertainty!=stitched_uncertainty)#bad variances due to negative counts after overscan/bias/dark correction
# stitched_mask[bad]=True
# stitched_uncertainty[bad]=1.e+10
# stitched.uncertainty=stitched_uncertainty
# stitched.mask=stitched_mask
# stitched.mask[bad]=True
stitched.header=c1_reduce.header
stitched.write(out,overwrite=True)
for i in range(0,len(fibermap_utdate)):
for j in mapfile0[i]:
for ccd in (['b','r']):
out=datadir+fibermap_utdate[i]+'/'+ccd+str(j).zfill(4)+'_stitched.fits'
for chip in (['c1','c2','c3','c4']):
master_bias=astropy.nddata.CCDData.read(directory+m2fsrun+'_'+ccd+'_'+chip+'_master_bias.fits')
obs_readnoise=np.float(master_bias.header['obs_rdnoise'])
master_dark=astropy.nddata.CCDData.read(directory+ccd+'_'+chip+'_master_dark.fits')
filename=datadir+fibermap_utdate[i]+'/'+ccd+str(j).zfill(4)+chip+'.fits'
data=astropy.nddata.CCDData.read(filename,unit=u.adu)#header is in data.meta
print(filename,data.header['object'],data.header['binning'])
oscan_subtracted=ccdproc.subtract_overscan(data,overscan=data[:,1024:],overscan_axis=1,model=models.Polynomial1D(3),add_keyword={'oscan_corr':'Done'})
trimmed1=ccdproc.trim_image(oscan_subtracted[:,:1024],add_keyword={'trim1':'Done'})
trimmed2=ccdproc.trim_image(trimmed1[:1028,:1024],add_keyword={'trim2':'Done'})
debiased0=ccdproc.subtract_bias(trimmed2,master_bias)
dedark0=ccdproc.subtract_dark(debiased0,master_dark,exposure_time='exptime',exposure_unit=u.second,scale=True,add_keyword={'dark_corr':'Done'})
data_with_deviation=ccdproc.create_deviation(dedark0,gain=data.meta['egain']*u.electron/u.adu,readnoise=obs_readnoise*u.electron)
gain_corrected=ccdproc.gain_correct(data_with_deviation,data_with_deviation.meta['egain']*u.electron/u.adu,add_keyword={'gain_corr':'Done'})
gain_corrected2=deepcopy(gain_corrected)
for k in range(0,len(gain_corrected2.data)):
for q in range(0,len(gain_corrected2.data[k])):
gain_corrected2.uncertainty.quantity.value[k][q]=(np.max(np.array([gain_corrected2.data[k][q]+2.+obs_readnoise**2,0.6*obs_readnoise**2])))**0.5##rescale variances using empirically-determined fudges that hold when readnoise ~ 2.5 electrons (via S. Koposov, private comm. May 2020)
# cr_cleaned=ccdproc.cosmicray_lacosmic(gain_corrected,sigclip=10)
# bad=np.where(gain_corrected.data<0.)
# bad=np.where(gain_corrected._uncertainty.quantity.value!=gain_corrected._uncertainty.quantity.value)#bad variances due to negative counts after overscan/bias/dark correction
# gain_corrected.uncertainty.quantity.value[bad]=obs_readnoise
if chip=='c1':
c1_reduce=gain_corrected2
if chip=='c2':
c2_reduce=gain_corrected2
if chip=='c3':
c3_reduce=gain_corrected2
if chip=='c4':
c4_reduce=gain_corrected2
left_data=np.concatenate((c1_reduce,np.flipud(c4_reduce)),axis=0)#left half of stitched image
left_uncertainty=np.concatenate((c1_reduce.uncertainty._array,np.flipud(c4_reduce.uncertainty._array)),axis=0)
left_mask=np.concatenate((c1_reduce.mask,np.flipud(c4_reduce.mask)),axis=0)
right_data=np.concatenate((np.fliplr(c2_reduce),np.fliplr(np.flipud(c3_reduce))),axis=0)#right half of stitched image
right_uncertainty=np.concatenate((np.fliplr(c2_reduce.uncertainty._array),np.fliplr(np.flipud(c3_reduce.uncertainty._array))),axis=0)
right_mask=np.concatenate((np.fliplr(c2_reduce.mask),np.fliplr(np.flipud(c3_reduce.mask))),axis=0)
stitched_data=np.concatenate((left_data,right_data),axis=1)
stitched_uncertainty=np.concatenate((left_uncertainty,right_uncertainty),axis=1)
stitched_mask=np.concatenate((left_mask,right_mask),axis=1)
stitched=astropy.nddata.CCDData(stitched_data,unit=u.electron,uncertainty=StdDevUncertainty(stitched_uncertainty),mask=stitched_mask)
# bad=np.where(stitched_uncertainty!=stitched_uncertainty)#bad variances due to negative counts after overscan/bias/dark correction
# stitched_mask[bad]=True
# stitched_uncertainty[bad]=1.e+10
# stitched.uncertainty=stitched_uncertainty
# stitched.mask=stitched_mask
# stitched.mask[bad]=True
stitched.header=c1_reduce.header
stitched.write(out,overwrite=True)
|
987,999 | 0c7efeefb6581f8f073cd689723d8757ae4a7a9d | from django.conf import settings
from django.conf.urls import url
from django.contrib import admin
from django.urls import include, path
from rest_framework_jwt.views import refresh_jwt_token, obtain_jwt_token
from .routers import urlpatterns as api_urlpatterns
from rest_framework import permissions
urlpatterns = [
path("api/v1/", include(api_urlpatterns)),
path(settings.ADMIN_URL, admin.site.urls),
path("api/v1/auth/", include('rest_auth.urls')),
path("api/v1/auth/registration/", include('rest_auth.registration.urls')),
path("api/v1/auth/refresh_token/", refresh_jwt_token),
path("api/v1/auth/obtain_token/", obtain_jwt_token),
]
if settings.DEBUG:
from drf_yasg.views import get_schema_view
from drf_yasg import openapi
schema_view = get_schema_view(
openapi.Info(
title="API",
default_version='v0.1',
),
public=True,
permission_classes=(permissions.AllowAny,),
)
urlpatterns += [
url(r'^swagger(?P<format>\.json|\.yaml)$', schema_view.without_ui(cache_timeout=0), name='schema-json'),
url(r'^swagger/$', schema_view.with_ui('swagger', cache_timeout=0), name='schema-swagger-ui'),
url(r'^redoc/$', schema_view.with_ui('redoc', cache_timeout=0), name='schema-redoc'),
]
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.