text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
"""
Created on Sat May 29 19:22:47 2021
@author: rashe
"""
import pandas as pd
import numpy as np
def Get_Mahalanobis(dataframe):
dataframe = dataframe.reset_index(drop=True)
nunique = dataframe.apply(pd.Series.nunique)
if dataframe.shape[1] >= 15:
cols_to_drop = nunique[nunique <= 2].index
dataframe = dataframe.drop(cols_to_drop, axis=1)
features = list(dataframe)
means = pd.DataFrame(np.zeros(len(features)))
covariance = np.cov(dataframe.T)
inv_cov = np.linalg.inv(covariance)
Mahalanobis = np.zeros(len(dataframe))
for j in range(0,len(means)):
means[0][j] = np.mean(dataframe.iloc[:,j])
means = means.reset_index(drop=True)
for i in range(0,len(dataframe)):
first = pd.DataFrame(dataframe.iloc[i,:]).reset_index(drop=True)
V = first[i]-means[0]
Mahalanobis[i] = np.sqrt(np.dot(np.dot(V.T,inv_cov), V))#[0][0]
return(Mahalanobis, features) |
#!/usr/local/bin/python3
import requests, json
api_key = "9ba6fc1b788596955f9cda5396fb080a"
base_url = "https://api.openweathermap.org/data/2.5/weather?"
# Change this to be your city
city = "4668054"
URL = base_url + "id=" + city + "&appid=" + api_key + "&units=imperial"
response = requests.get(URL)
#print(URL)
if response.status_code == 200:
data = response.json()
main = data['main']
temp = main['temp']
max = main['temp_max']
min = main['temp_min']
feels = main['feels_like']
humidity = main['humidity']
pressure = main['pressure']
report = data['weather']
print(f"{city:-^30}")
print(f"Current Temperature: {temp}")
print(f"Feels Like: {feels}")
print(f"Max Temperature: {max}")
print(f"Current Temperature: {min}")
print(f"Humidity: {humidity}")
print(f"Pressure: {pressure}")
print(f"Weather Report: {report[0]['description']}")
else:
print(f"Error Accessing Site Status code: {response.status_code}") |
import random
from collections import defaultdict
from ..evaluation import Scoresheet
from ..util import all_pairs
from .base import Predictor
__all__ = ["Community", "Copy", "Random"]
class Community(Predictor):
def predict(self): # pylint:disable=E0202
"""Predict using community structure
If two nodes belong to the same community, they are predicted to form
a link. This uses the Louvain algorithm, which determines communities
at different granularity levels: the finer grained the community, the
higher the resulting score.
This needs the python-louvain package. Install linkpred as follows:
$ pip install linkpred[community]
"""
try:
import community
except ImportError as err:
msg = (
"Module 'community' could not be found. Please install linkpred with: "
"$ pip install linkpred[community]"
)
raise ImportError(msg) from err
res = Scoresheet()
dendogram = community.generate_dendrogram(self.G)
for i in range(len(dendogram)):
partition = community.partition_at_level(dendogram, i)
communities = defaultdict(list)
weight = len(dendogram) - i # Lower i, smaller communities
for n, com in partition.items():
communities[com].append(n)
for nodes in communities.values():
for u, v in all_pairs(nodes):
if not self.eligible(u, v):
continue
res[(u, v)] += weight
return res
class Copy(Predictor):
def predict(self, weight=None): # pylint:disable=E0202
"""Predict by copying the training network
If weights are used, the likelihood score is equal to the link weight.
This predictor is mostly intended as a sort of baseline. By definition,
it only yields predictions if we do not exclude links from the training
network (with `excluded`).
Parameters
----------
weight : None or string, optional
If None, all edge weights are considered equal.
Otherwise holds the name of the edge attribute used as weight.
"""
if weight is None:
return Scoresheet.fromkeys(self.G.edges(), 1)
return Scoresheet(((u, v), d[weight]) for u, v, d in self.G.edges(data=True))
class Random(Predictor):
def predict(self): # pylint:disable=E0202
"""Predict randomly
This predictor can be used as a baseline.
"""
res = Scoresheet()
for a, b in all_pairs(self.eligible_nodes()):
res[(a, b)] = random.random()
return res
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
# In Debian, install `apt install python-crypto`
__all__ = ['AESCipher', 'RSACipher', 'Checksum']
from Crypto import Random
from Crypto.Cipher import AES
from base64 import b64encode, b64decode
try:
import gmpy2
_bitLength = gmpy2.bit_length
_divMod = gmpy2.f_divmod
_extendedGCD = gmpy2.gcdext
_gcd = gmpy2.gcd
_iRoot = gmpy2.iroot
_iSqrt = gmpy2.isqrt
def _perfectSqrt(x):
s, r = gmpy2.isqrt_rem(x)
return s if r == 0 else -1
# f_mod(x, y int) -- The remainder will have the same sign as y
_mod = gmpy2.f_mod
_modInverse = gmpy2.invert
_mulProduct = gmpy2.mul
_mpz = gmpy2.mpz
except ImportError:
# if True:
try:
int.bit_length(1)
def _bitLength(x):
return x.bit_length()
except AttributeError:
def _bitLength(x):
'''
Calculates the bit length of x
'''
assert x >= 0
n = 0
while x > 0:
n += 1
x >>= 1
return n
def _divMod(x, y):
'''_divMod(x, y) -> (q, r)
Returns the quotient and remainder of x divided by y.
The quotient is floor rounding and the remainder will have the same sign as y.
x and y must be integers.
'''
return divmod(x, y)
def _extendedGCD(a, b):
'''_extendedGCD(a, b) -> (g, s, t)
Returns (g, s, t) and such that a*s + b*t = g and g = gcd(a,b)
'''
(s, old_s) = (0, 1)
(t, old_t) = (1, 0)
(r, old_r) = (b, a)
while r != 0:
(quotient, mod) = _divMod(old_r, r)
(old_r, r) = (r, mod)
(old_s, s) = (s, old_s - quotient*s)
(old_t, t) = (t, old_t - quotient*t)
g, s, t = old_r, old_s, old_t
return (g, s, t)
def _gcd(a,b):
from fractions import gcd as greatest_common_divisor
return greatest_common_divisor(a, b)
def _iRoot(x, n):
lo, hi = -1, (x + 1)
while (lo + 1) < hi:
y = (lo + hi) // 2
p = y**n
if p < x:
lo = y
else:
hi = y
exact = ((hi**n) == x)
y = hi if exact else lo
return (y, exact)
def _iSqrt(n):
'''Returns the integer square root of n (int) and n >= 0.'''
if n < 0:
raise ValueError('Negative numbers: n = %d'%(n))
elif n == 0:
return 0
a,b= _divMod(_bitLength(n), 2)
x = 2**(a+b)
y = (x + n//x)//2
while True:
y = (x + n//x)//2
if y >= x:
return x
x = y
return x
def _perfectSqrt(x):
'''
Returns s if s*s = x else -1
'''
last_hexdigit = x & 0xf
notPerfectList = [2, 3, 5, 6, 7, 8, 10, 11, 12, 13, 14, 15]
if (last_hexdigit in notPerfectList):
return - 1
s = _iSqrt(x)
return s if s*s == x else -1
def _modInverse(a, m):
'''Returns x (int) such that a*x = 1 (mod m)'''
(g, x, _) = _extendedGCD(a, m)
return (x % m) if g == 1 else 0
def _mulProduct(a, b):
return a*b
def _mpz(n=0): return n
def _mod(a,b): return a%b
class RSACipher(object):
"""Mostly stuffs for RSA"""
@staticmethod
def continued_fraction(n, m=1):
'''
Returns continued fraction `pquotients' in form (a0, a1, .., a_n)
of a rational p/q
Example:
>>> continued_fraction(45, 16)
(2, 1, 4, 3)
>>>
Algorithm:
Using GCD algorithm
n = q × m + r
=====================
45 = 2 × 16 + 13
16 = 1 × 13 + 3
13 = 4 × 3 + 1
3 = 3 × 1 + 0
'''
q, r = _divMod(n, m)
partial_quotients = [q]
while r != 0:
n, m = m, r
q, r = _divMod(n, m)
partial_quotients.append(q)
return tuple(partial_quotients)
@staticmethod
def continued_fraction_convergents(pquotients):
'''
Returns iterator to list of convergents (rational approximations)
of the continued fraction in form of (n, m), equivalent with n/m
Note:
+ Even-numbered convergents are smaller than the original number,
while odd-numbered ones are bigger.
Example:
>>> pquotients = continued_fraction(73, 27)
>>> print(pquotients)
(2, 1, 2, 2, 1, 2)
>>> c = continued_fraction_convergents(pquotients)
>>> print(list(c))
[(2, 1), (3, 1), (8, 3), (19, 7), (27, 10), (73, 27)]
>>>
Reference:
+ https://oeis.org/wiki/Continued_fractions
'''
if len(pquotients) == 0:
yield (0, 1)
else:
p_2, q_2 = 0, 1
p_1, q_1 = 1, 0
for a_i in pquotients:
p = a_i*p_1 + p_2 # p[i] = a[i]*p[i-1] + p[i-2]
q = a_i*q_1 + q_2 # q[i] = a[i]*q[i-1] + q[i-2]
p_2, p_1 = p_1, p
q_2, q_1 = q_1, q
c = (p, q) # c_i = p_i / q_i, i>=0
yield c
@staticmethod
def gcd(a,b):
return _gcd(a,b)
@staticmethod
def extended_gcd(a, b):
'''Returns (g, x, y) and such that a*x + b*y = g and g = gcd(a,b)'''
return _extendedGCD(a, b)
@staticmethod
def mod_inverse(a, m):
'''Returns x (int) such that a*x = 1 (mod m)'''
x = _modInverse(a, m)
if x == 0:
raise ValueError('No x such that %d*x = 1 (mod %d)'%(a, m))
return x
@staticmethod
def chinese_remainder(n, a):
'''
Returns x (int) such that
x = a_i (mod n_i) for i := 1 -> k
Reference: https://rosettacode.org/wiki/Chinese_remainder_theorem
'''
prod = reduce(_mulProduct, n) # reduce is faster than equivalent for loop
total = _mpz(0)
for (n_i, a_i) in zip(n, a):
p = prod // n_i
total += a_i * RSACipher.mod_inverse(p, n_i) * p
return _mod(total, prod)
@staticmethod
def iroot(x, n):
'''
Returns (y, exact) (int, bool) such that y**n = x
@param n: (int) > 0
@param x: (int) >= 0
'''
return _iRoot(x, n)
@staticmethod
def hastad_broadcast_attack(N, C):
'''
Retunrs plain text m in form long type such that e=len(N)=len(C) and
e is small and we knew `e' pairs module n, ciphertext c
In short, returns m if
c_i = (m**k) (mod n_i) for i: 1->k
With chinese remainder theorem:
c' = (m**k) (mod n_1*n_2*..*n_k) for i: 1->k
'''
e = len(N)
assert(e == len(C))
remainder = RSACipher.chinese_remainder(N, C)
for (n, c) in zip(N, C):
assert(_mod(remainder, n) == c)
m, exact = RSACipher.iroot(remainder, e)
assert(exact)
return m
@staticmethod
def wiener_attack(e, n):
'''
Returns d knowing (e, n) applying the Wiener continued fraction attack
-------------------------------
RSA-keys are Wiener-vulnerable if d < (n^(1/4))/sqrt(6)
The RSA keys are obtained as follows:
1. Choose two prime numbers p and q
2. Compute n=p*q
3. Compute phi(n)=(p-1)*(q-1)
4. Choose e such that 1 < e < phi(n); e and phi(n) are coprime
5. Compute d = e^(-1) (mod phi(n))
6. e is the public key;
n is also made public (determines the block size);
d is the private key
Encryption is as follows:
1. Size of data to be encrypted must be less than n
2. ciphertext=pow(plaintext, e, n)
Decryption is as follows:
1. Size of data to be decrypted must be less than n
2. plaintext=pow(ciphertext, d, n)
'''
frac = RSACipher.continued_fraction(e, n)
convergents = RSACipher.continued_fraction_convergents(frac)
for (k, d) in convergents:
#check if d is actually the key
if (k != 0) and ((e*d - 1)%k == 0):
phi = (e*d - 1)//k
s = n - phi + 1
# check if the equation x^2 - s*x + n = 0
# has integer roots
discr = s*s - 4*n
if discr >= 0:
t = _perfectSqrt(discr)
if (t != -1) and ((s+t)%2 == 0):
return d
return -1
class AESCipher(object):
'''
Reference:
+ http://pythonhosted.org/pycrypto/
'''
MODE_ECB = AES.MODE_ECB
MODE_CBC = AES.MODE_CBC
def __init__(self, key, mode=AES.MODE_ECB):
# key (byte string) - The secret key to use in the symmetric cipher.
# It must be 16 (AES-128), 24 (AES-192), or 32 (AES-256) bytes long.
# key must be hash by sha256, md5 before pass to this class
# Why hash key? To len(key) in AES.key_size
assert mode in (AES.MODE_ECB, AES.MODE_CBC)
assert len(key) in AES.key_size
self.key = key
self.bs = AES.block_size
self.mode = mode
def __repr__(self):
return "AESCipher(key=%r, mode=%r)" % (self.key, self.mode)
def encrypt(self, raw):
"""Encrypt using AES in CBC or ECB mode."""
raw = self.pad(raw)
iv = (Random.new().read(self.bs) if (self.mode == AES.MODE_CBC)
else '')
aes = AES.new(key=self.key, mode=self.mode, IV=iv)
return b64encode(iv + aes.encrypt(raw))
def decrypt(self, enc):
"""Decrypt using AES in CBC mode. Expects the IV at the front of the string."""
enc = b64decode(enc)
if self.mode == AES.MODE_CBC:
iv = enc[:self.bs]
enc = enc[self.bs:]
else:
iv = ''
aes = AES.new(key=self.key, mode=self.mode, IV=iv)
dec = aes.decrypt(enc)
return self.unpad(dec)
def unpad(self, text):
"""PKCS7 unpad"""
last_byte = ord(text[-1:])
if last_byte > self.bs:
return text
if text[-last_byte:] != chr(last_byte) * last_byte:
return text
return text[:-last_byte]
def pad(self, text):
"""PKCS7 pad"""
pad_num = self.bs - len(text) % self.bs
return text + chr(pad_num) * pad_num
class Checksum():
@staticmethod
def _hashlib_wrapper(method, filename, block_size):
check_sum_fun = method()
with open(filename, 'rb') as fd:
for block in iter(lambda: fd.read(block_size), b''):
check_sum_fun.update(block)
return check_sum_fun.hexdigest()
@staticmethod
def sha256sum(filename, block_size=65536):
'''Return sha256 sum of a file
Efficency when works with many file
'''
return Checksum._hashlib_wrapper(hashlib.sha256, filename, block_size)
@staticmethod
def sha1sum(filename, block_size=65536):
'''Return sha1 sum of a file'''
return Checksum._hashlib_wrapper(hashlib.sha1, filename, block_size)
@staticmethod
def md5sum(filename, block_size=65536):
'''Return md5 sum of a file'''
return Checksum._hashlib_wrapper(hashlib.md5, filename, block_size)
|
import ROOT
ROOT.gSystem.Load("RooUnfold/libRooUnfold")
from ROOT import gRandom, TH1, cout, TH2, TLegend, TFile
from ROOT import RooUnfoldResponse
from ROOT import RooUnfold
from ROOT import RooUnfoldBayes
from ROOT import TCanvas
from ROOT import RooUnfoldSvd
from optparse import OptionParser
parser = OptionParser()
parser.add_option('--extension', action ='store', type = 'string',
default ='',
dest='extension',
help='Runs jec, correct options are _jecup : _jecdn : _jerup : _jerdn : or nothing at all to get the nominal')
parser.add_option('--pythia6', action ='store_true', default=False, dest='pythia6')
(options, args) = parser.parse_args()
myfile = TFile('qcdmc_stitched_qcdmc.root')
pythia6 = None
outtext = ''
outfile = None
ROOT.gStyle.SetOptStat(000000)
response = myfile.Get('2d_response' + options.extension )
outtext = options.extension
truth = myfile.Get('PFJet_pt_m_AK8Gen')
reco = myfile.Get('PFJet_pt_m_AK8')
responseSD = myfile.Get('2d_response_softdrop' + options.extension )
truthSD = myfile.Get('PFJet_pt_m_AK8SDgen')
recoSD = myfile.Get('PFJet_pt_m_AK8SD')
response.Draw('colz')
truth.Scale(1./truth.Integral())
reco.Scale(1./reco.Integral())
truthSD.Scale(1./truthSD.Integral())
recoSD.Scale(1./recoSD.Integral())
pt_bin = {0: '200-240', 1: '240-310', 2: '310-400', 3: '400-530', 4: '530-650', 5: '650-760', 6: '760-Inf'}
unfold = RooUnfoldBayes(response, reco, 6)
unfoldSD = RooUnfoldBayes(responseSD, recoSD, 6)
#unfold= RooUnfoldSvd(response, reco, 5);
reco_unfolded = unfold.Hreco()
reco_unfoldedSD = unfoldSD.Hreco()
################### New Correlation matrix stuff
cov = unfold.Ereco()
covSD = unfoldSD.Ereco()
nb= cov.GetNrows()
import math
cor = ROOT.TH2F("cor", "", nb, 0, nb, nb, 0, nb)
corSD = ROOT.TH2F("corSD", "", nb, 0, nb, nb, 0, nb)
for i in xrange(0,nb) :
for j in xrange(0,nb) :
Viijj = cov[i][i] * cov[j][j]
if Viijj>0.0 :
cor.SetBinContent(i+1, j+1, cov[i][j]/math.sqrt(Viijj))
for i in xrange(0,nb) :
for j in xrange(0,nb) :
Viijj = covSD[i][i] * covSD[j][j]
if Viijj>0.0 :
corSD.SetBinContent(i+1,j+1, covSD[i][j]/math.sqrt(Viijj) )
cov_canvas=TCanvas("cov canvas", "cov canvas")
cov_canvas.cd()
cor.SetMinimum(-1.0)
cor.SetMaximum(1.0)
cor.Draw("colz")
cov_canvas.Update()
cov_canvas.Print("CovarianceMatrix.png", "png")
cov_canvas.Print("CovarianceMatrix.pdf", "pdf")
covSD_canvas=TCanvas("covSDcanvas", "covSDcanvas")
covSD_canvas.cd()
corSD.SetMinimum(-1.0)
corSD.SetMaximum(1.0)
corSD.Draw("colz")
covSD_canvas.Update()
covSD_canvas.Print("CovarianceMatrixSD.png", "png")
covSD_canvas.Print("CovarianceMatrixSD.pdf", "pdf")
###################
creco = TCanvas("creco", "creco")
reco_unfolded.Draw()
c2=TCanvas()
c2.cd()
reco_unfoldedSD.Draw()
truth.SetLineColor(4)
truth.Draw('SAME')
canvases = []
namesreco = []
namesgen = []
legends = []
canvasesSD = []
legendsSD = []
namesrecoSD = []
namesgenSD = []
keepHists = []
for i in range(0, 7):
namesreco.append(None)
namesgen.append(None)
legends.append(TLegend(.7, .5, .9, .7))
canvases.append(TCanvas())
canvasesSD.append(TCanvas())
legendsSD.append(TLegend(.7, .5, .9, .7))
namesrecoSD.append(None)
namesgenSD.append(None)
for i, canvas in enumerate(canvases) :
canvas.cd()
ihist = namesreco[i] = reco_unfolded.ProjectionY('pythia8_mass' + str(i), i+1, i+1)
keepHists.append( ihist )
namesreco[i].SetTitle('Mass Projection for P_{T} ' + pt_bin[i] + ' GeV')
namesreco[i].Draw('hist')
ihist = namesgen[i] = truth.ProjectionY('genmass' + str(i), i+1 , i+1)
keepHists.append( ihist)
namesgen[i].SetLineColor(2)
namesgen[i].Draw('same hist')
legends[i].AddEntry(namesreco[i], 'Reco', 'l')
legends[i].AddEntry(namesgen[i], 'Gen', 'l')
legends[i].Draw()
canvas.SaveAs('unfolded_closure_preplotter_'+pt_bin[i] + options.extension + '.png')
for i, canvas in enumerate(canvasesSD):
canvas.cd()
ihist = namesrecoSD[i] = reco_unfoldedSD.ProjectionY('pythia8_massSD' + str(i), i+1, i+1)
keepHists.append(ihist)
namesrecoSD[i].SetTitle('SD Mass Projection for P_{T} ' + pt_bin[i] + ' GeV')
namesrecoSD[i].Draw('hist')
ihist = namesgenSD[i] = truthSD.ProjectionY('genmassSD' + str(i), i+1, i+1)
keepHists.append(ihist)
namesgenSD[i].SetLineColor(2)
namesgenSD[i].Draw('same hist')
legendsSD[i].AddEntry(namesrecoSD[i], 'SD Reco', 'l')
legendsSD[i].AddEntry(namesgenSD[i], 'SD Gen', 'l')
legendsSD[i].Draw()
canvas.SaveAs('unfolded_closure_softdrop_preplotter_' + pt_bin[i] + options.extension + '.png')
outfile = TFile('2DClosure' + options.extension + '.root', 'RECREATE')
outfile.cd()
for hists in namesreco:
hists.Write()
for stuff in namesgen:
stuff.Write()
for morestuff in namesrecoSD:
morestuff.Write()
for evenmore in namesgenSD:
evenmore.Write()
outfile.Write()
outfile.Close()
|
import json
import re
import httpretty
import pytest
from social.apps.django_app.default.models import DjangoStorage
from social.backends.google import GoogleOAuth2
from social.p3 import urlparse
from social.strategies.django_strategy import DjangoStrategy
from social.utils import parse_qs
def handle_state(backend, start_url, target_url):
try:
if backend.STATE_PARAMETER or backend.REDIRECT_STATE:
query = parse_qs(urlparse(start_url).query)
target_url = target_url + ('?' in target_url and '&' or '?')
if 'state' in query or 'redirect_state' in query:
name = 'state' in query and 'state' or 'redirect_state'
target_url += '{0}={1}'.format(name, query[name])
except AttributeError:
pass
return target_url
@pytest.yield_fixture
def facebook_auth():
httpretty.enable()
def callback(method, uri, headers):
if 'graph.facebook.com/oauth/access_token' in uri:
body = 'access_token=test_access_token&expires=5156423'
elif 'graph.facebook.com/me' in uri:
body = json.dumps({
'id': '12345',
'name': 'Foo Bar',
'username': 'foo.bar',
'email': 'foobar@googlemail.com'
})
else:
raise Exception('API call without mocking: {0}.'.format(uri))
return (200, headers, body)
httpretty.register_uri(httpretty.GET, re.compile(r'.*'), body=callback)
yield
httpretty.disable()
httpretty.reset()
@pytest.yield_fixture
def twitter_auth():
httpretty.enable()
request_token_body = '&'.join([
'oauth_token=test_request_token',
'oauth_token_secret=test_request_token_secret',
'oauth_callback_confirmed=true'])
httpretty.register_uri(
httpretty.GET,
re.compile(r'.*api\.twitter\.com/oauth/request_token'),
body=request_token_body)
access_token_body = '&'.join([
'oauth_token=test_access_token',
'oauth_token_secret=test_access_token_secret',
'user_id=12345',
'screen_name=pappeldackel'])
httpretty.register_uri(
httpretty.GET,
re.compile(r'.*api\.twitter\.com/oauth/access_token'),
body=access_token_body)
verify_credentials_body = json.dumps({
'id': 12345,
'name': 'Foo Bar',
'screen_name': 'foobar',
'notifications': False})
httpretty.register_uri(
httpretty.GET,
re.compile(r'.*twitter\.com/1\.1/account/verify_credentials\.json'),
body=verify_credentials_body)
yield {'oauth_token': 'test_request_token'}
httpretty.disable()
httpretty.reset()
@pytest.yield_fixture
def google_auth():
# TODO: This could be abstracted for twitter and facebook too.
httpretty.enable()
def _method(method):
return {'GET': httpretty.GET,
'POST': httpretty.POST}[method]
strategy = DjangoStrategy(GoogleOAuth2, DjangoStorage)
start_url = strategy.start().url
target_url = handle_state(
GoogleOAuth2,
start_url,
strategy.build_absolute_uri('/complete/{0}/?code=foobar')
)
httpretty.register_uri(
httpretty.GET,
start_url,
status=301,
location=target_url
)
httpretty.register_uri(
httpretty.GET,
target_url,
status=200,
body='foobar'
)
httpretty.register_uri(
_method(GoogleOAuth2.ACCESS_TOKEN_METHOD),
uri=GoogleOAuth2.ACCESS_TOKEN_URL,
status=200,
body=json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'}),
content_type='text/json'
)
user_data_url = 'https://www.googleapis.com/oauth2/v1/userinfo'
if user_data_url:
httpretty.register_uri(
httpretty.GET,
user_data_url,
body=json.dumps({
'email': 'foo@bar.com',
'id': '101010101010101010101'}),
content_type='application/json'
)
yield
httpretty.disable()
httpretty.reset()
|
#!/usr/bin/python
#ab environment ban gya fir teminal me jakr ./filename
# ! she bang / hash bang and non technically it is called environment provider
# ! /usr/bin/env python -> ek aur tareeka
x=10
y=20
print x+y
print type(x)
t=(4,6,78)
print len(t)
|
from a10sdk.common.A10BaseClass import A10BaseClass
class Sip(A10BaseClass):
"""Class Description::
Change LSN SIP ALG Settings.
Class sip supports CRUD Operations and inherits from `common/A10BaseClass`.
This class is the `"PARENT"` class for this module.`
:param sip_value: {"optional": true, "enum": ["enable"], "type": "string", "description": "'enable': Enable SIP ALG for LSN; ", "format": "enum"}
:param rtp_stun_timeout: {"description": "RTP/RTCP STUN timeout in minutes (Default is 5 minutes)", "format": "number", "default": 5, "optional": true, "maximum": 10, "minimum": 2, "type": "number"}
:param DeviceProxy: The device proxy for REST operations and session handling. Refer to `common/device_proxy.py`
URL for this object::
`https://<Hostname|Ip address>//axapi/v3/cgnv6/lsn/alg/sip`.
"""
def __init__(self, **kwargs):
self.ERROR_MSG = ""
self.required=[]
self.b_key = "sip"
self.a10_url="/axapi/v3/cgnv6/lsn/alg/sip"
self.DeviceProxy = ""
self.sip_value = ""
self.rtp_stun_timeout = ""
for keys, value in kwargs.items():
setattr(self,keys, value)
|
'''
CONSOLE COMMAND: Add Return Date of Activity
REQUEST FOR CHANGE: 658
ISSUE: 1029
CMD: python scripts/add_return_doa.py | tee -a logs/rfc_0658_20210519T1000.log
Update Return Running Sheet entries to set Date of Activity to date added.
'''
import os
import sys
import django
proj_path = '/app'
sys.path.append(proj_path)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wildlifecompliance.settings")
django.setup()
from wildlifecompliance.components.returns.services import ReturnService
try:
ReturnService.etl_return_sheet(True)
except Exception as e:
print(e)
|
import functools
from typing import Callable, Iterable, Tuple, Union
import numpy as np
from matplotlib import animation, pyplot as plt
from matplotlib.animation import FuncAnimation
from scipy.integrate import odeint
mpl_lim = Union[float, Tuple[float, float]]
def plot_ode(
func: Callable,
initial_conditions: Iterable,
times: np.ndarray,
system_params: dict,
show: bool = True,
save: str = '',
xlim: mpl_lim = 10,
ylim: mpl_lim = 10,
):
"""
Animate the coupled pendulums.
Args:
func: Function to be integrated.
initial_conditions: Initial conditions for seeding integral.
times: Times to evaluate equations of motion.
system_params: Physical parameters of the system.
show: Display trajectory animation.
save: Filepath to save trajectory animation.
xlim: For plotting.
ylim: For plotting.
"""
pend_func = functools.partial(func, **system_params)
x, xd, y, yd = odeint(pend_func, initial_conditions, times).T
if isinstance(xlim, (int, float)):
xlim = -xlim, xlim
if isinstance(ylim, (int, float)):
ylim = -ylim, ylim
m = (x > xlim[0]) & (x < xlim[1])
m &= (y > ylim[0]) & (y < ylim[1])
x, y = x[m], y[m]
fig, ax = plt.subplots()
ln, = ax.plot([], [], 'o')
def init():
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ln.set_data([], [])
return ln,
def update(vals):
ln.set_data(vals[:1], vals[1:])
return ln,
frames = np.stack([x, y], axis=-1)
ani = FuncAnimation(fig, update, init_func=init, frames=frames, blit=True, interval=25, repeat=False)
if save:
Writer = animation.writers['ffmpeg']
writer = Writer(fps=40, metadata=dict(artist='Me'), bitrate=1800)
ani.save(save, writer=writer)
if show:
plt.show()
|
import logging
import dbus
logger = logging.getLogger(__name__)
class DbusDevice(object):
## The constructor processes the tree of dbus-items.
# @param bus Session/System bus object
# @param name the dbus-service-name.
def __init__(self, bus, name, eventCallback):
self._dbus_name = name
self._dbus_conn = bus
self._eventCallback = eventCallback
self._service_id = self._dbus_conn.get_name_owner(name)
self._dbus_conn.add_signal_receiver(self._on_dbus_value_changed,
dbus_interface='com.victronenergy.BusItem', signal_name='PropertiesChanged', path_keyword='path',
sender_keyword='service_id')
self._dbus_conn.add_signal_receiver(self._on_dbus_items_changed,
dbus_interface='com.victronenergy.BusItem',
signal_name='ItemsChanged', path='/', sender_keyword='service_id')
def __del__(self):
logger.debug('__del__ %s' % self)
self._dbus_name = None
self._value = None
self._eventCallback = None
def _on_dbus_value_changed(self, changes, path=None, service_id=None):
if service_id == self._service_id:
self._eventCallback(self._dbus_name, path, changes)
def _on_dbus_items_changed(self, items, service_id=None):
if service_id == self._service_id:
for path, changes in items.items():
self._eventCallback(self._dbus_name, path, changes)
## Returns the dbus-service-name which represents the Victron-device.
def __str__(self):
return "DbusDevice=%s" % self._dbus_name
def getBusName(self):
return self._dbus_name
def getValues(self):
data = self._dbus_conn.call_blocking(self._dbus_name, '/', None, 'GetValue', '', [])
texts = self._dbus_conn.call_blocking(self._dbus_name, '/', None, 'GetText', '', [])
values = {}
for p, v in data.items():
values['/' + p] = {
'Value': v,
'Valid': bool(v != dbus.Array([])),
'Text': texts[p]}
return values
|
"""
================================================================================
pypolar: Analysis of polarization using the Jones and/or the Mueller calculus
================================================================================
http://github.com/scottprahl/pypolar
Usage:
import pypolar.jones as jones
import pypolar.mueller as mueller
light = jones.field_horizontal()
print("Jones vector for horizontally-polarized light")
print(light)
light = mueller.field_left_circular()
print("Stokes vector for left circularly polarized light")
print(light)
"""
__author__ = 'Scott Prahl'
__version__ = '0.5.1'
|
# -*- coding: utf-8 -*-
import csv
import sys
from model import *
COMUNIDAD = 0
PROVINCIA = 2
MUNICIPIO = 4
POBLACION = 5
CENSOTOTAL = 7
VOTOSTOTALES = 8
VOTOSVALIDOS = 9
VOTOSCANDIDATURA = 10
VOTOSBLANCO = 11
VOTOSNULO = 12
class Provincia:
def __init__(self):
self.poblacion = 0
self.censoTotal = 0
def LoadFile(filepath):
voteReader = csv.reader(open(filepath,'rb'),delimiter=",")
# Cargar todos los partidos existentes y asociarlos con su numero de col
voteReader.next()
voteReader.next()
voteReader.next()
voteReader.next()
nombres = voteReader.next()
siglas = voteReader.next()
partidos = {}
votos = {}
for sigla in siglas[13:]:
id = siglas.index(sigla)
partidos[id] = (nombres[id],sigla)
for row in voteReader:
# Actualizar Informacion Provincia
comunidad = row[COMUNIDAD].strip()
provincia = row[PROVINCIA].strip()
municipio = row[MUNICIPIO].strip()
poblacionMunicipio = eval(row[POBLACION].strip().replace(",",""))
censoMunicipio = eval(row[CENSOTOTAL].strip().replace(",",""))
totalVotosMunicipio = eval(row[VOTOSTOTALES].strip().replace(",",""))
validosMunicipio = eval(row[VOTOSVALIDOS].strip().replace(",",""))
candidaturasMunicipio = eval(row[VOTOSCANDIDATURA].strip().replace(",",""))
blancoMunicipio = eval(row[VOTOSBLANCO].strip().replace(",",""))
nulosMunicipio = eval(row[VOTOSNULO].strip().replace(",",""))
if (not comunidad in votos):
votos[comunidad] = {}
if (not provincia in votos[comunidad]):
votos[comunidad][provincia] = {'Poblacion':0,'Censo':0,'TotalVotos':0,'Validos':0,'Candidaturas':0,'Blanco':0,'Nulos':0,'Votos':{}}
votos[comunidad][provincia]['Poblacion'] += poblacionMunicipio
votos[comunidad][provincia]['Censo'] += censoMunicipio
votos[comunidad][provincia]['TotalVotos'] += totalVotosMunicipio
votos[comunidad][provincia]['Validos'] += validosMunicipio
votos[comunidad][provincia]['Candidaturas'] += candidaturasMunicipio
votos[comunidad][provincia]['Blanco'] += blancoMunicipio
votos[comunidad][provincia]['Nulos'] += nulosMunicipio
# Actualizar votos partidos
for idPartido in range(13,len(row)):
sigla = siglas[idPartido]
votosMunicipio = eval(row[idPartido].replace(",",""))
if (votosMunicipio > 0):
if (not sigla in votos[comunidad][provincia]['Votos']):
votos[comunidad][provincia]['Votos'][sigla] = votosMunicipio
else:
votos[comunidad][provincia]['Votos'][sigla] += votosMunicipio
# El ano es el mismo para todos
i = filepath.find('.csv')
ano = filepath[i-4:i]
WriteToDataStore(ano, votos)
def WriteToDataStore(ano, votos):
for comunidad in votos:
for provincia in votos[comunidad]:
censo = CensoElectoral()
censo.ano = ano
censo.comunidad = comunidad
censo.provincia = provincia
censo.poblacion = votos[comunidad][provincia]['Poblacion']
censo.censoTotal = votos[comunidad][provincia]['Censo']
censo.votosTotales = votos[comunidad][provincia]['TotalVotos']
censo.votosValidos = votos[comunidad][provincia]['Validos']
censo.votosCandidatura = votos[comunidad][provincia]['Candidaturas']
censo.votosBlanco = votos[comunidad][provincia]['Blanco']
censo.votosNulo = votos[comunidad][provincia]['Nulos']
censo.put()
for partido in votos[comunidad][provincia]['Votos']:
escrutinio = Escrutinio()
escrutinio.censo = censo
escrutinio.partido = partido
escrutinio.votos = votos[comunidad][provincia]['Votos'][partido]
escrutinio.put()
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.exporters import CsvItemExporter
from scrapy.conf import settings
#import pymongo
# class MongoProductPipeline(object):
# collection_name = 'products'
# def __init__(self):
# connection = pymongo.MongoClient(settings['MONGODB_SERVER'],
# settings['MONGODB_PORT'])
# db = connection[settings['MONGODB_DB']]
# self.collection = db[settings['MONGODB_COLLECTION']]
# def process_item(self, item, spider):
# self.collection.insert(dict(item))
# return item
class ProductPipeline(object):
def __init__(self):
self.file = open('Exported.csv', 'wb')
self.exporter = CsvItemExporter(self.file)
self.exporter.start_exporting()
def close_spider(self, spider):
self.exporter.finish_exporting()
self.file.close()
def process_item(self, item, spider):
self.exporter.export_item(item)
return item
|
from PIL import Image
from matplotlib import pyplot as plt
import os
import math
import numpy as np
from utils.dir import dir_dict
DIR = dir_dict["VOC_DIR"]
SETS = [('2012', 'train'), ('2012', 'val'), ('2007', 'train'), ('2007', 'val'), ('2007', 'test')]
CLASS_LIST = ["aeroplane", "bicycle", "bird", "boat", "bottle",
"bus", "car", "cat", "chair", "cow", "diningtable",
"dog", "horse", "motorbike", "person", "pottedplant",
"sheep", "sofa", "train", "tvmonitor"]
def get_color(c, x, max_val):
colors = [[1,0,1], [0,0,1], [0,1,1], [0,1,0], [1,1,0], [1,0,0]]
ratio = float(x)/max_val * 5
i = int(math.floor(ratio))
j = int(math.ceil(ratio))
ratio = ratio - i
r = (1-ratio) * colors[i][c] + ratio*colors[j][c]
return int(r*255)
def draw_bbox(ax, boxes, img_dim, class_id=None):
"""
boxes: (x_center, y_center, width, height)
img_dim: (W, H)
output: plot boxes on img
"""
for i, box in enumerate(boxes):
x1 = (box[0] - box[2]/2.0)*img_dim[0]
y1 = (box[1] - box[3]/2.0)*img_dim[1]
# x2 = (box[0] + box[2]/2.0)*img_dim[0]
# y2 = (box[1] + box[3]/2.0)*img_dim[1]
width = box[2]*img_dim[0]
height = box[3]*img_dim[1]
if class_id[i] is not None:
classes = len(class_id)
offset = class_id[i] * 123457 % classes
red = get_color(2, offset, classes) / 255
green = get_color(1, offset, classes) / 255
blue = get_color(0, offset, classes) / 255
color = (red, green, blue)
ax.text(x1, y1, CLASS_LIST[int(class_id[i])], fontsize=16, color=color)
else:
color = 'r'
import matplotlib.patches as patches
rect = patches.Rectangle((x1, y1), width, height, linewidth=2, edgecolor=color, facecolor='none')
ax.add_patch(rect)
def filter_zero_from_labels(labels):
labels_new = np.asarray([i for i in labels if np.sum(i) != 0])
return labels_new
def load_class_names(namesfile):
class_names = []
with open(namesfile, 'r') as fp:
lines = fp.readlines()
for line in lines:
line = line.rstrip()
class_names.append(line)
return class_names |
import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import json
import pickle
import os
import tensorflow as tf
from ._utils._utils import check_file, load_graph
from . import home
from ._utils._paths import PATH_TOXIC, S3_PATH_TOXIC
from ._models._sklearn_model import TOXIC
from ._models._tensorflow_model import SIGMOID
def available_deep_model():
"""
List available deep learning toxicity analysis models.
"""
return ['bahdanau', 'hierarchical', 'luong', 'fast-text', 'entity-network']
def multinomial():
"""
Load multinomial toxic model.
Returns
-------
TOXIC : malaya._models._sklearn_model.TOXIC class
"""
check_file(PATH_TOXIC['multinomial'], S3_PATH_TOXIC['multinomial'])
with open(PATH_TOXIC['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(PATH_TOXIC['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
return TOXIC(multinomial, vectorize)
def logistic():
"""
Load logistic toxic model.
Returns
-------
TOXIC : malaya._models._sklearn_model.TOXIC class
"""
check_file(PATH_TOXIC['logistic'], S3_PATH_TOXIC['logistic'])
with open(PATH_TOXIC['logistic']['model'], 'rb') as fopen:
logistic = pickle.load(fopen)
with open(PATH_TOXIC['logistic']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
return TOXIC(logistic, vectorize)
def deep_model(model = 'luong'):
"""
Load deep learning sentiment analysis model.
Parameters
----------
model : str, optional (default='luong')
Model architecture supported. Allowed values:
* ``'fast-text'`` - Fast-text architecture, embedded and logits layers only
* ``'hierarchical'`` - LSTM with hierarchical attention architecture
* ``'bahdanau'`` - LSTM with bahdanau attention architecture
* ``'luong'`` - LSTM with luong attention architecture
* ``'entity-network'`` - Recurrent Entity-Network architecture
Returns
-------
TOXIC: malaya._models._tensorflow_model.SIGMOID class
"""
assert isinstance(model, str), 'model must be a string'
model = model.lower()
if model == 'fast-text':
check_file(PATH_TOXIC['fast-text'], S3_PATH_TOXIC['fast-text'])
with open(PATH_TOXIC['fast-text']['setting'], 'r') as fopen:
dictionary = json.load(fopen)['dictionary']
with open(PATH_TOXIC['fast-text']['pickle'], 'rb') as fopen:
ngram = pickle.load(fopen)
g = load_graph(PATH_TOXIC['fast-text']['model'])
return SIGMOID(
g.get_tensor_by_name('import/Placeholder:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
model,
dictionary,
ngram = ngram,
)
elif model == 'hierarchical':
check_file(PATH_TOXIC['hierarchical'], S3_PATH_TOXIC['hierarchical'])
with open(PATH_TOXIC['hierarchical']['setting'], 'r') as fopen:
dictionary = json.load(fopen)['dictionary']
g = load_graph(PATH_TOXIC['hierarchical']['model'])
return SIGMOID(
g.get_tensor_by_name('import/Placeholder:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
model,
dictionary,
alphas = g.get_tensor_by_name('import/alphas:0'),
)
elif model in ['bahdanau', 'luong']:
check_file(PATH_TOXIC[model], S3_PATH_TOXIC[model])
with open(PATH_TOXIC[model]['setting'], 'r') as fopen:
dictionary = json.load(fopen)['dictionary']
g = load_graph(PATH_TOXIC[model]['model'])
return SIGMOID(
g.get_tensor_by_name('import/Placeholder:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
model,
dictionary,
alphas = g.get_tensor_by_name('import/alphas:0'),
)
elif model == 'entity-network':
check_file(
PATH_TOXIC['entity-network'], S3_PATH_TOXIC['entity-network']
)
with open(PATH_TOXIC['entity-network']['setting'], 'r') as fopen:
dictionary = json.load(fopen)
g = load_graph(PATH_TOXIC['entity-network']['model'])
return SIGMOID(
g.get_tensor_by_name('import/Placeholder_question:0'),
g.get_tensor_by_name('import/logits:0'),
tf.InteractiveSession(graph = g),
model,
dictionary,
dropout_keep_prob = g.get_tensor_by_name(
'import/Placeholder_dropout_keep_prob:0'
),
story = g.get_tensor_by_name('import/Placeholder_story:0'),
)
else:
raise Exception(
'model sentiment not supported, please check supported models from malaya.toxic.available_deep_model()'
)
|
from flask import jsonify
from app.server import app
from app.models import DataValidationError
from flask_api import status
BAD_REQUEST_ERROR = 'Bad Request.'
METHOD_NOT_ALLOWED_ERROR = 'Method Not Allowed'
NOT_FOUND_ERROR = 'Not Found.'
UNSUPPORTED_MEDIA_TYPE_ERROR = 'Unsupported media type'
INTERNAL_SERVER_ERROR = 'Internal Server Error'
######################################################################
# Error Handlers
######################################################################
@app.errorhandler(DataValidationError)
def request_validation_error(error):
""" Handles all data validation issues from the model """
app.logger.error(error.message)
return jsonify(status=status.HTTP_400_BAD_REQUEST, error=BAD_REQUEST_ERROR,
message=error.message), status.HTTP_400_BAD_REQUEST
@app.errorhandler(status.HTTP_400_BAD_REQUEST)
def bad_request(error):
""" Handles requests that have bad or malformed data """
app.logger.error(str(error))
return jsonify(status=status.HTTP_400_BAD_REQUEST, error=BAD_REQUEST_ERROR,
message=error.description), status.HTTP_400_BAD_REQUEST
@app.errorhandler(status.HTTP_404_NOT_FOUND)
def not_found(error):
""" Handles product information that cannot be found """
message = error.message or str(error)
app.logger.error(message)
return jsonify(status=status.HTTP_404_NOT_FOUND, error=NOT_FOUND_ERROR,
message=error.message), status.HTTP_404_NOT_FOUND
@app.errorhandler(status.HTTP_415_UNSUPPORTED_MEDIA_TYPE)
def mediatype_not_supported(error):
""" Handles unsuppoted media requests with 415_UNSUPPORTED_MEDIA_TYPE """
message = error.message or str(error)
app.logger.error(message)
return jsonify(status=status.HTTP_415_UNSUPPORTED_MEDIA_TYPE, error=UNSUPPORTED_MEDIA_TYPE_ERROR,
message=message), status.HTTP_415_UNSUPPORTED_MEDIA_TYPE
@app.errorhandler(status.HTTP_500_INTERNAL_SERVER_ERROR)
def internal_server_error(error):
""" Handles unexpected server error with 500_SERVER_ERROR """
message = error.message or str(error)
app.logger.error(message)
return jsonify(status=status.HTTP_500_INTERNAL_SERVER_ERROR, error=INTERNAL_SERVER_ERROR,
message=message), status.HTTP_500_INTERNAL_SERVER_ERROR
|
# -*- coding: utf-8 -*-
from yapsy.PluginManager import PluginManager
from yapsy.IPlugin import IPlugin
class Help(IPlugin):
def execute(self, channel, username, command):
manager = PluginManager()
manager.setPluginPlaces(["plugins"])
manager.collectPlugins()
plugins = []
if command:
description = manager.getPluginByName(command).description
yield channel, (description)
else:
for plugin in manager.getAllPlugins():
plugins.append(plugin.name)
yield channel, (', '.join(plugins))
|
# http://codeforces.com/contest/282/problem/A
n = int(input())
statements = [input() for x in range(n)]
x = 0
for line in statements:
if line == 'X++' or line == '++X':
x += 1
else:
x -= 1
print(x)
|
import numpy as np
import pandas as pd
import sys
import csv
import time
from PIL import Image
from sklearn.model_selection import train_test_split
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
import torchvision.transforms as transforms
use_cuda = torch.cuda.is_available()
torch.manual_seed(123)
device = torch.device("cuda" if use_cuda else "cpu")
print('Device used:', device)
class data(Dataset):
def __init__(self, x, y, transform = None):
self.x = x
self.y = y
self.transform = transform
self.len = x.shape[0]
def __getitem__(self, index):
image = Image.fromarray(self.x[index].reshape(48, 48))
if self.transform is not None:
image = self.transform(image)
return image, self.y[index]
def __len__(self):
return self.len
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
def conv_bn(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, oup, 3, stride, 1, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True)
)
def conv_dw(inp, oup, stride):
return nn.Sequential(
nn.Conv2d(inp, inp, 3, stride, 1, groups=inp, bias=False),
nn.BatchNorm2d(inp),
nn.ReLU(inplace=True),
nn.Conv2d(inp, oup, 1, 1, 0, bias=False),
nn.BatchNorm2d(oup),
nn.ReLU(inplace=True),
)
self.model = nn.Sequential(
conv_bn(1, 32, 1),
conv_bn(32, 32, 1),
nn.MaxPool2d(2),
nn.Dropout(0.25),
conv_dw(32, 64, 1),
conv_dw(64, 64, 1),
conv_dw(64, 80, 2),
conv_dw(80, 80, 1),
conv_dw(80, 80, 2),
conv_dw(80, 80, 1),
nn.AvgPool2d(6),
nn.Dropout(0.5)
)
self.fc = nn.Sequential(
nn.Linear(80, 7),
)
def forward(self, x):
x = self.model(x)
x = x.view(-1, 80)
x = self.fc(x)
return x
def load_data(train_path):
train = pd.read_csv(train_path, header = 0)
train = np.array(train.values)
Y_train = train[:, 0]
feature = train[:, 1]
n = len(feature)
X_train = np.zeros((n, 48*48))
for i in range(n):
x = [int(f) for f in feature[i].split()]
x = np.array(x)
X_train[i] = x
X_train = X_train.reshape(n, 1, 48, 48)
x_train, x_valid, y_train, y_valid = train_test_split(X_train, Y_train, test_size = 0.1, random_state = 0)
return x_train, y_train, x_valid, y_valid
train_path = sys.argv[1]
x_train, y_train, x_valid, y_valid = load_data(train_path)
train_set = data(x_train, y_train,
transform = transforms.Compose([
transforms.RandomHorizontalFlip(),
transforms.RandomRotation(15),
transforms.ToTensor()
])
)
valid_set = data(x_valid, y_valid,
transform = transforms.Compose([
transforms.ToTensor()
])
)
batch_size = 128
lr = 0.0005
n_epoch = 150
train_loader = DataLoader(train_set, batch_size=batch_size, shuffle=True, num_workers=1)
valid_loader = DataLoader(valid_set, batch_size=batch_size, shuffle=False, num_workers=1)
model = Net().to(device)
# print(model)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
best_acc = 0.0
for epoch in range(n_epoch):
epoch_start_time = time.time()
train_acc = 0.0
train_loss = 0.0
val_acc = 0.0
val_loss = 0.0
model.train()
for i, data in enumerate(train_loader):
optimizer.zero_grad()
output = model(data[0].to(device))
loss = criterion(output, data[1].to(device))
loss.backward()
optimizer.step()
train_acc += np.sum(np.argmax(output.cpu().data.numpy(), axis=1) == data[1].numpy())
train_loss += loss.item()
model.eval()
for i, data in enumerate(valid_loader):
pred = model(data[0].to(device))
loss = criterion(pred, data[1].to(device))
val_acc += np.sum(np.argmax(pred.cpu().data.numpy(), axis=1) == data[1].numpy())
val_loss += loss.item()
val_acc = val_acc / len(valid_set)
train_acc = train_acc / len(train_set)
val_loss = val_loss / len(valid_set)
train_loss = train_loss / len(train_set)
print('[%03d/%03d] %2.2f sec(s) Train Acc: %3.6f Loss: %3.6f | Val Acc: %3.6f Loss: %3.6f'
% (epoch + 1, n_epoch, time.time()-epoch_start_time, train_acc, train_loss, val_acc, val_loss))
if (val_acc > best_acc):
torch.save(model.state_dict(), 'my_model.pth')
best_acc = val_acc
print ('Model Saved!')
|
print('You are in Bananas branch!')
# #int
k = 5
# #string
t = 'Ana'
# #float
f = 5.67
print('You are in Coconuts Branch!')
#int-float
a = 2
b = 54
c = 13.5
print(a+b+c)
x = 13
z = 23.5
y = x + z
print (y % x)
a = 2.5
p = 1
q = a + 3
print(q ** 13 //a)
#string
a = "Hello"
b = "Coconut"
print(a + b)
reddish = 1
lettuce = 3
apple = 2.5
sauce = 0.25
salad = reddish + lettuce + apple + sauce // 4
print(salad + 1.5)
# test = int(3) + 1
test = int('3') + z
print("Test: ", test)
text = "В реальной жизни мы совершаем различные действия над окружающими нас предметами"
print(len(text))
print(text[:])
print(text[34])
print(text[3] + text[45] + text[62] + text[-1])
print(text[::-1])
text = "В реальной жизни мы совершаем различные действия над окружающими нас предметами"
print(text.title()) |
import torch.utils.data
import random
import scipy.misc
import numpy as np
import os
import math
import utils
from tqdm import tqdm
from point_cloud import Depth2BEV, get_visibility_grid
import time
import pandas as pd
class SubsetSampler(torch.utils.data.sampler.Sampler):
def __init__(self, indices):
self.indices = indices
def __iter__(self):
return (self.indices[i] for i in range(len(self.indices)))
def __len__(self):
return len(self.indices)
class IntPhys(torch.utils.data.Dataset):
def __init__(self, opt, split):
self.opt = opt
self.index = 0
self.test = 'test' in split
self.depth2bev = Depth2BEV(opt)
self.crop_sz = int(opt.crop_sz/2)
self.compute_visibility_grid = opt.visibility_grid
if not opt.use_occluded:
self.annotations = 'annotations'
else:
self.annotations = 'annotations-occluded'
if opt.list:
self.file = os.path.join(opt.list, split + '.npy')
self.paths = np.load(self.file).tolist()
if opt.remove_images_with_no_objects:
self.no_object_indices = \
np.load(os.path.join(opt.list,
split + '_no_object_indices.npy')).tolist()
else:
self.no_object_indices = []
count = min(opt.count, len(self.paths)) * self.opt.m
#count = min(opt.count, len(self.paths))
else:
self.pattern = opt.pattern
count = opt.count * opt.m
count = count * 0.9 if 'train' in split else count * 0.1
count = int(count)
self.i0 = 1 if 'train' in split else int(0.9 * opt.count + 1)
# Create list of valid indices for subset sampler
start = 100 if 'train' in split else 0
self.indices = list(range(start,count))
if len(self.no_object_indices) > 1:
self.indices = list(set(self.indices) - set(self.no_object_indices))
self.inverted_indices = {}
for idx, el in enumerate(self.indices):
self.inverted_indices[el] = idx
self.count = len(self.indices)
self.count = self.count - (self.count % opt.bsz)
vars(opt)['n_sample_%s' %split] = self.count
vars(opt)['nbatch_%s' %split] = int(self.count / opt.bsz)
print('n_sample_%s: %s' %(split, self.count))
self.last_offsets = None
self.last_flip = False
self.manhattan_dist = 3
self.flip_prob = opt.random_flip
if 'train' in split:
det_file = opt.train_detections_file
elif 'val' in split:
det_file = opt.val_detections_file
else:
det_file = ""
if det_file != "":
self.detections_file = pd.read_csv(det_file, index_col=None)
def __getitem__(self, index):
video_idx = math.floor(index / self.opt.m)
video_path = self._getpath(video_idx)
frame_idx = index % self.opt.m
def extract_random_image_patch(obj_array, bev):
c, h, w = bev.shape
obj = obj_array[np.random.randint(0, len(obj_array))]
i = obj[0]; j = obj[1]
# randomly shift
i += np.random.randint(-3, 3)
j += np.random.randint(-3, 3)
h_min = max(0, j-self.crop_sz); h_max = min(h-1, j+self.crop_sz)
w_min = max(0, i-self.crop_sz); w_max = min(w-1, i+self.crop_sz)
crop = bev[:, h_min:h_max, w_min:w_max]
# if crop is not 48x48, pad with zeros
crop_c, crop_h, crop_w = crop.shape
final_crop = np.zeros((crop_c, self.crop_sz*2, self.crop_sz*2), dtype=np.uint8)
padding = []
for chw in [crop_h, crop_w]:
if chw < self.crop_sz * 2:
short = (self.crop_sz * 2) - chw
if short % 2 == 0:
pad_width = (int(short/2), int(short/2))
else:
l = np.floor(short/2)
pad_width = (int(l+1), int(l))
padding.append(pad_width)
else:
padding.append((0,0))
for r in range(crop_c):
final_crop[r] = np.pad(crop[r], padding, 'constant', constant_values=0)
# randomly flip
if np.random.uniform() < self.flip_prob:
for r in range(crop_c):
final_crop[r] = np.flipud(final_crop[r]).copy()
return final_crop
def load_BEV_crop(idx, label):
idx += 1
# if idx is even it's positive example
# if odd, negative example
if label:
if idx % 2 == 1:
x = {'binary_target': np.array([0.])}
return {'BEV': x, 'FV': x}
else:
x = {'binary_target': np.array([1.])}
return {'BEV': x, 'FV': x}
else:
with open('%s/%s/%03d.txt' %(video_path, self.annotations, idx), 'r') as f:
result = {}
# this cast to np.float32 is very important...
depth_img = np.float32(scipy.misc.imread(
'%s/depth/depth_%03d.png' %(video_path, idx)))
# uses the fixed depth, not the actual max depth
# need to rescale objects
pc = self.depth2bev.depth_2_point_cloud(depth_img)
# Note that this BEV map is already in PyTorch CHW format
bev, offsets = self.depth2bev.point_cloud_2_view(pc, view='BEV')
self.last_offsets = offsets
fv, _ = self.depth2bev.point_cloud_2_view(pc, view='FV')
result['FV_full'] = fv
gt_objects = []
# grab occluders too
gt_occluders = []
_, _, occluders = self.depth2bev.parse_status('%s/status.json' %(video_path))
frame = occluders[idx-1]
for vidx, (v, grid) in enumerate(zip(['BEV', 'FV'], [bev, fv])):
f.seek(0)
max_depth = float(f.readline())
gt_objects.append([])
gt_occluders.append([])
c, h, w = grid.shape
for line in f:
pos = np.array(list(map(float, line.split(" "))))
rescaled_pos = self.depth2bev.backproject_and_rescale(
pos, self.depth2bev.fixed_depth / max_depth)
for r in range(3): rescaled_pos[r] += self.last_offsets[r]
# pixel location of the object
i, j = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view=v)
# if object is in view
if 0 <= i < w and 0 <= j < h:
gt_objects[vidx].append((i,j))
for pos in frame:
rescaled_pos = self.depth2bev.backproject_and_rescale(
pos, self.depth2bev.fixed_depth / max_depth)
for r in range(3): rescaled_pos[r] += self.last_offsets[r]
# pixel location of the object
i, j = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view=v)
# if object is in view
if 0 <= i < w and 0 <= j < h:
gt_occluders[vidx].append((i,j))
# if positive
if idx % 2 == 0 and len(gt_objects[vidx]) > 0:
# extract a 48 x 48 crop around a random item in the video
final_crop = extract_random_image_patch(gt_objects[vidx], grid)
# negative examples
elif idx % 2 == 1 or (idx % 2 == 0 and len(gt_objects[vidx]) == 0):
# If there is an occluder, randomly sample patch from it, otherwise use a random patch
if len(gt_occluders[vidx]) > 0:
final_crop = extract_random_image_patch(gt_occluders[vidx], grid)
else:
# Sample a 48x48 patch from the image that doesn't overlap gt patches
done = False
while not done:
i = np.random.randint(self.crop_sz, w-self.crop_sz)
j = np.random.randint(self.crop_sz, h-self.crop_sz)
if len(gt_objects[vidx]) == 0:
done = True
else:
for obj in gt_objects[vidx]:
gt_i = obj[0]; gt_j = obj[1]
if abs(gt_i - i) > self.crop_sz and abs(gt_j - j) > self.crop_sz:
done = True
h_min = max(0, j-self.crop_sz); h_max = min(h-1, j+self.crop_sz)
w_min = max(0, i-self.crop_sz); w_max = min(w-1, i+self.crop_sz)
crop = grid[:, h_min:h_max, w_min:w_max]
crop_c, crop_h, crop_w = crop.shape
# randomly flip
if np.random.uniform() < self.flip_prob:
for r in range(crop_c):
crop[r] = np.flipud(crop[r]).copy()
final_crop = crop
result[v] = final_crop
return result
def load_BEV(idx, label):
# The output BEV prediction maps is downsampled
# by some amount
pixor_downsample = 4
idx += 1
if not label:
# this cast to np.float32 is very important...
depth_img = np.float32(scipy.misc.imread(
'%s/depth/depth_%03d.png' %(video_path, idx)))
# uses the fixed depth, not the actual max depth
# need to rescale objects
pc = self.depth2bev.depth_2_point_cloud(depth_img)
# Note that this BEV map is already in PyTorch CHW format
bev, offsets = self.depth2bev.point_cloud_2_view(pc, view='BEV')
if self.compute_visibility_grid:
# 10 is the scaling ratio from original world coords to
# BEV dimensions
cam_x = int(np.floor(offsets[0] / 10))
cam_y = int(np.floor(offsets[1] / 10))
vg = get_visibility_grid(bev, (cam_x, cam_y))
# AVERAGING
vg = vg[:15,:,:].mean(axis=0, keepdims=True)
self.last_offsets = offsets
fv, _ = self.depth2bev.point_cloud_2_view(pc, view='FV')
if np.random.uniform() < self.flip_prob:
self.last_flip = True
else:
self.last_flip = False
if self.last_flip and not self.test:
for r in range(bev.shape[0]):
bev[r] = np.flipud(bev[r]).copy()
if self.last_flip or self.test:
for r in range(fv.shape[0]):
fv[r] = np.flipud(fv[r]).copy()
if self.test:
if self.compute_visibility_grid:
f = '%s/%s' %(video_path, idx)
return {'BEV': bev, 'FV': fv, 'point_cloud': pc, 'VG': vg, 'cam': [cam_x, cam_y], 'frame': f}
else:
return {'BEV': bev, 'FV': fv, 'point_cloud': pc}
else:
return {'BEV': bev, 'FV': fv}
else:
with open('%s/%s/%03d.txt' %(video_path, self.annotations, idx), 'r') as f:
# BEV
# 87
grid_x = int(np.ceil(self.depth2bev.bev_x_dim / pixor_downsample))
# 63
grid_y = int(np.ceil(self.depth2bev.bev_y_dim / pixor_downsample))
bev_grid_dims = [grid_x, grid_y]
# FV
# 87
grid_x = int(np.ceil(self.depth2bev.fv_x_dim / pixor_downsample))
# 63
grid_y = int(np.ceil(self.depth2bev.fv_y_dim / pixor_downsample))
fv_grid_dims = [grid_x, grid_y]
data = {}
gt_objects = []
gt_objects_px = []
# grab occluders too
gt_occluders = []
_, _, occluders = self.depth2bev.parse_status('%s/status.json' %(video_path))
occluders = occluders[idx-1]
for view, grid_dims in zip(['BEV', 'FV'], [bev_grid_dims, fv_grid_dims]):
f.seek(0)
max_depth = float(f.readline())
data[view] = {}
grid_x = grid_dims[0]; grid_y = grid_dims[1]
# Use H x W for easy integration with PyTorch
binary_map = np.zeros((grid_y, grid_x))
#height_map = np.zeros((grid_y, grid_x))
regression_map = np.zeros((2, grid_y, grid_x))
if view == 'BEV':
for pos in occluders:
rescaled_pos = self.depth2bev.backproject_and_rescale(
pos, self.depth2bev.fixed_depth / max_depth)
for r in range(3): rescaled_pos[r] += self.last_offsets[r]
# pixel location of the object
i, j = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view='BEV')
# convert z to height channel
k = int(np.floor(rescaled_pos[2] / (self.depth2bev.pc_z_dim \
/ self.depth2bev.grid_height_channels)))
#_, k = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view='FV')
gt_occluders.append((j,i,k))
for line in f:
if line == '\n':
continue
pos = np.array(list(map(float, line.split(" "))))
rescaled_pos = self.depth2bev.backproject_and_rescale(
pos, self.depth2bev.fixed_depth / max_depth)
for r in range(3): rescaled_pos[r] += self.last_offsets[r]
if view == 'BEV':
i_, j_ = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view='BEV')
k_ = int(np.floor(rescaled_pos[2] / (self.depth2bev.pc_z_dim \
/ self.depth2bev.grid_height_channels)))
#_, k_ = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=1, view='FV')
if 0 <= j_ < self.depth2bev.bev_y_dim and 0 <= i_ < self.depth2bev.bev_x_dim:
gt_objects.append(rescaled_pos)
gt_objects_px.append((j_,i_,k_))
# pixel location of the object
i, j = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=pixor_downsample, view=view)
# set pixels in ~120 pixel radius to 1 (120 / 10 / 4 ~ 3)
if 0 <= i < grid_x and 0 <= j < grid_y: # check k
c = (i, j)
px = utils.get_nearby_pixels(c, self.manhattan_dist, (grid_y, grid_x))
for p in px: # positives
binary_map[p[1], p[0]] = 1
# compute dx, dy for each grid cell in the set of positives
if view == 'BEV':
x, y = self.depth2bev.grid_cell_2_point(p[0],
p[1], scale=pixor_downsample, view=view)
dx = rescaled_pos[0] - x; dy = rescaled_pos[1] - y
for r,d in enumerate([dx, dy]):
# normalize to N(0,1)
d = (d - self.depth2bev.regression_stats[view][r][0]) \
/ self.depth2bev.regression_stats[view][r][1]
regression_map[r, p[1], p[0]] = d
elif view == 'FV':
y, z = self.depth2bev.grid_cell_2_point(p[0], p[1],
scale=pixor_downsample, view=view)
dy = rescaled_pos[1] - y; dz = rescaled_pos[2] - z
for r,d in enumerate([dy, dz]):
# normalize to N(0,1)
d = (d - self.depth2bev.regression_stats[view][r][0]) \
/ self.depth2bev.regression_stats[view][r][1]
regression_map[r, p[1], p[0]] = d
if self.last_flip:
binary_map = np.flipud(binary_map).copy()
for r in range(2):
regression_map[r] = np.flipud(regression_map[r]).copy()
data[view]['binary_target'] = binary_map
data[view]['regression_target'] = regression_map
data['objects_px'] = {'balls': gt_objects_px, 'walls': gt_occluders}
data['objects'] = gt_objects
return data
def load_BEV_prior(idx, label):
# The output BEV prediction maps is downsampled
# by some amount
pixor_downsample = 4
idx += 1
if not label:
# this cast to np.float32 is very important...
depth_img = np.float32(scipy.misc.imread(
'%s/depth/depth_%03d.png' %(video_path, idx)))
# uses the fixed depth, not the actual max depth
# need to rescale objects
pc = self.depth2bev.depth_2_point_cloud(depth_img)
# Note that this BEV map is already in PyTorch CHW format
bev, offsets = self.depth2bev.point_cloud_2_view(pc, view='BEV')
# 10 is the scaling ratio from original world coords to
# BEV dimensions
cam_x = int(np.floor(offsets[0] / 10))
cam_y = int(np.floor(offsets[1] / 10))
vg = get_visibility_grid(bev, (cam_x, cam_y))
# AVERAGING
vg = vg[:15,:,:].mean(axis=0, keepdims=True)
self.last_offsets = offsets
if np.random.uniform() < self.flip_prob:
self.last_flip = True
else:
self.last_flip = False
if self.last_flip and not self.test:
for r in range(bev.shape[0]):
bev[r] = np.flipud(bev[r]).copy()
vg[r] = np.flipud(vg[r]).copy()
return {'BEV': bev, 'VG': vg, 'cam': [cam_x, cam_y]}
else:
data = {}
# collect coords of fully occluded objects in 4x downsampled pixel coords
with open('%s/annotations-occluded/%03d.txt' %(video_path, idx), 'r') as f:
max_depth = float(f.readline())
data['occluded'] = -np.ones((3,3))
obj_idx=0
for line in f:
if line == '\n':
continue
pos = np.array(list(map(float, line.split(" "))))
rescaled_pos = self.depth2bev.backproject_and_rescale(
pos, self.depth2bev.fixed_depth / max_depth)
for r in range(3): rescaled_pos[r] += self.last_offsets[r]
i_, j_ = self.depth2bev.point_2_grid_cell(rescaled_pos, scale=pixor_downsample, view='BEV')
k_ = int(np.floor(rescaled_pos[2] / (self.depth2bev.pc_z_dim \
/ self.depth2bev.grid_height_channels)))
if 0 <= i_ < int(round(self.depth2bev.bev_x_dim/pixor_downsample)) and \
0 <= j_ < int(round(self.depth2bev.bev_y_dim/pixor_downsample)):
data['occluded'][obj_idx,0] = k_
data['occluded'][obj_idx,1] = j_
data['occluded'][obj_idx,2] = i_
obj_idx += 1
# collect detections and scores
data['detections'] = -np.ones((3,4))
indx = self.inverted_indices[index]
dets = self.detections_file[
self.detections_file['idx'] == indx].values.astype(np.float32)[:,1:]
# convert detections to pixel values
data['detections'][:len(dets),0] = np.floor(dets[:,0] / (self.depth2bev.pc_z_dim / self.depth2bev.grid_height_channels))
data['detections'][:len(dets),1] = np.floor(dets[:,1] / (self.depth2bev.grid_y_res * pixor_downsample))
data['detections'][:len(dets),2] = np.floor(dets[:,2] / (self.depth2bev.grid_x_res * pixor_downsample))
data['detections'][:len(dets),3] = dets[:,3]
return data
def load(x, nc, start, seq, interp, c):
out = []
for j,f in enumerate(seq):
if f == 'z':
f = 'x' if random.random() < self.opt.px else 'y'
if f == 'y':
ri = random.randint(0, len(self.paths) - 1)
v = self.paths[ri].decode('UTF-8')
else:
v = os.path.join(video_path, c)
if f == 'x' or f == 'y':
f = random.randint(1, self.opt.n_frames) - start
assert not self.test
img = scipy.misc.imread(
'%s/%s/%s_%03d.png' %(v, x, x, start + f),
#mode='RGB'
)
out.append(scipy.misc.imresize(
img,
(self.opt.frame_height, self.opt.frame_width),
interp))
return np.array(out)
def load_diff(x, nc, start, seq, interp, c):
if self.opt.residual == 0:
return load(x, nc, start, seq, interp, c)
else:
out0 = load(x, nc, start, seq, interp, c)
out1 = load(x, nc, start + self.opt.residual, seq, interp, c)
return out1 - out0
def make_output(x, start, seq, c='.'):
if x == 'edge':
raise NotImplementedError
elif x == 'depth':
return load_diff('depth', 1, start, seq, 'bilinear', c)
elif x == 'bev-depth':
return load_BEV(start, label=False)
elif x == 'bev-label':
return load_BEV(start, label=True)
elif x == 'bev-crop':
return load_BEV_crop(start, label=False)
elif x == 'bev-crop-label':
return load_BEV_crop(start, label=True)
elif x == 'bev-prior':
return load_BEV_prior(start, label=False)
elif x == 'bev-prior-label':
return load_BEV_prior(start, label=True)
elif x == 'mask':
mask_value = utils.get_mask_index(
os.path.join(video_path, str(c), 'status.json'),
self.opt.mask_object
)
raw_mask = load_diff('mask', 1, start, seq, 'nearest', c)
mask = raw_mask.astype(int)
out = [np.ones(mask.shape, dtype=bool)]
for o in self.opt.mask_object:
m = np.zeros(mask.shape, dtype=bool)
for v in mask_value[o]:
m[mask == v] = True
out[0][mask == v] = False
out.append(m)
return np.transpose(np.array(out, dtype=int), (1, 0, 2, 3))
elif x == 'scene':
out = load_diff('scene', self.opt.num_channels, start, seq,
'bilinear', c).astype(float) / 255
return np.transpose(out, (0, 3, 1, 2))
else:
print('Unknown opt.input or opt.target: ' + x)
return None
if self.test:
# TODO
#input_, target = [], []
#for c in range(1, 5):
# input_.append(make_output(
# self.opt.input, frame_idx, self.opt.input_seq, str(c)
# ))
# target.append(make_output(
# self.opt.target, frame_idx, self.opt.target_seq, str(c)
# ))
#input_ = np.array(input_)
#target = np.array(target)
input_ = make_output(
self.opt.input, frame_idx, self.opt.input_seq
)
target = make_output(
self.opt.target, frame_idx, self.opt.target_seq
)
else:
input_ = make_output(
self.opt.input, frame_idx, self.opt.input_seq
)
target = make_output(
self.opt.target, frame_idx, self.opt.target_seq
)
#out.video_path = video_path
return input_, target
def __len__(self):
return self.count
def _getpath(self, video_idx):
if hasattr(self, 'paths'):
try:
video_path = self.paths[video_idx].decode('UTF-8')
except AttributeError:
video_path = self.paths[video_idx]
else:
video_path = self.pattern %(self.i0 + video_idx)
return video_path
|
from nltk.corpus import wordnet as wn
import os
for s in wn.all_synsets():
print "insert into WN.Synsets (ID,Definition,POS) values ('" + s.name + "','" + s.definition.replace("'","''") + "','" + s.pos + "')"
print "GO"
for l in s.lemmas:
print "insert into WN.Lemmas (ID,Lemma) values ('" + s.name + "','" + l.name.replace("'","''").replace("_", " ") + "')"
print "GO"
for a in l.antonyms():
print "insert into WN.Antonyms (ID,Lemma,AntonymID,AntonymLemma) values ('" + s.name + "','" + l.name.replace("'","''").replace("_", " ") + "','" + a.synset.name + "','" + a.name.replace("'","''").replace("_"," ") + "')"
print "GO"
for e in s.examples:
print "insert into WN.Examples (ID,Example) values ('" + s.name + "','" + e.replace("'","''") + "')"
print "GO"
for h in s.hypernyms():
print "insert into WN.Hypernyms (ID,HypernymID) values ('" + s.name + "','" + h.name + "')"
print "GO"
|
class Home:
def __init__(self):
self.__parking = False
self.__lights = [False for i in range(5)]
self.__doorlock = False
self.__temperature = 0.0
self.__energy = 100
@property
def parking(self):
return self.__parking
def setParking(self, parking):
self.__parking = parking
@property
def lights(self):
return self.__lights
def setLights(self, lights):
self.__lights = lights
@property
def doorlock(self):
return self.__doorlock
def setDoorlock(self, doorlock):
self.__doorlock = doorlock
@property
def temperature(self):
return self.__temperature
def setTemperature(self, temperature):
self.__temperature = temperature
@property
def energy(self):
return self.__energy
def setEnergy(self, energy):
self.__energy = energy
|
"""
Umweltbundesamt: Meeresumweltdatenbank (MUDAB)
Meeres-Monitoringdaten von Küstenbundesländern und Forschungseinrichtungen # noqa: E501
The version of the OpenAPI document: 1.0.0
Generated by: https://openapi-generator.tech
"""
import re # noqa: F401
import sys # noqa: F401
from deutschland.mudab.api_client import ApiClient, Endpoint as _Endpoint
from deutschland.mudab.model_utils import ( # noqa: F401
check_allowed_values,
check_validations,
date,
datetime,
file_type,
none_type,
validate_and_convert_types,
)
from deutschland.mudab.model.filter_request import FilterRequest
from deutschland.mudab.model.inline_response200 import InlineResponse200
from deutschland.mudab.model.inline_response2001 import InlineResponse2001
from deutschland.mudab.model.inline_response2002 import InlineResponse2002
from deutschland.mudab.model.inline_response2003 import InlineResponse2003
from deutschland.mudab.model.inline_response2004 import InlineResponse2004
from deutschland.mudab.model.inline_response2005 import InlineResponse2005
from deutschland.mudab.model.inline_response2006 import InlineResponse2006
from deutschland.mudab.model.inline_response2007 import InlineResponse2007
from deutschland.mudab.model.inline_response2008 import InlineResponse2008
class DefaultApi(object):
"""NOTE: This class is auto generated by OpenAPI Generator
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
self.list_mess_stationen_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2001,),
"auth": [],
"endpoint_path": "/STATION_SMALL",
"operation_id": "list_mess_stationen",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameter_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2002,),
"auth": [],
"endpoint_path": "/MV_PARAMETER",
"operation_id": "list_parameter",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameter_values_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2003,),
"auth": [],
"endpoint_path": "/MV_STATION_MSMNT",
"operation_id": "list_parameter_values",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameters_biologie_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2004,),
"auth": [],
"endpoint_path": "/MV_PARAMETER_BIOLOGIE",
"operation_id": "list_parameters_biologie",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameters_biota_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2005,),
"auth": [],
"endpoint_path": "/MV_PARAMETER_BIOTA",
"operation_id": "list_parameters_biota",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameters_sediment_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2007,),
"auth": [],
"endpoint_path": "/MV_PARAMETER_SEDIMENT",
"operation_id": "list_parameters_sediment",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_parameters_wasser_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2006,),
"auth": [],
"endpoint_path": "/MV_PARAMETER_WASSER",
"operation_id": "list_parameters_wasser",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_plc_stations_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse2008,),
"auth": [],
"endpoint_path": "/V_PLC_STATION",
"operation_id": "list_plc_stations",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
self.list_projekt_stationen_endpoint = _Endpoint(
settings={
"response_type": (InlineResponse200,),
"auth": [],
"endpoint_path": "/PROJECTSTATION_SMALL",
"operation_id": "list_projekt_stationen",
"http_method": "POST",
"servers": None,
},
params_map={
"all": [
"filter_request",
],
"required": [],
"nullable": [],
"enum": [],
"validation": [],
},
root_map={
"validations": {},
"allowed_values": {},
"openapi_types": {
"filter_request": (FilterRequest,),
},
"attribute_map": {},
"location_map": {
"filter_request": "body",
},
"collection_format_map": {},
},
headers_map={
"accept": ["application/json"],
"content_type": ["application/json"],
},
api_client=api_client,
)
def list_mess_stationen(self, **kwargs):
"""Liste aller Messstationen # noqa: E501
Gibt eine filterbare Liste aller Messtationen in der Datenbank zurück. Filterbare Attribute sind die Felder die aus dem Messstation Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_mess_stationen(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2001
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_mess_stationen_endpoint.call_with_http_info(**kwargs)
def list_parameter(self, **kwargs):
"""Liste aller Parameter # noqa: E501
Gibt eine filterbare Liste aller Parameter in der Datenbank zurück. Filterbare Attribute sind die Felder die aus dem Parameter Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameter(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2002
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameter_endpoint.call_with_http_info(**kwargs)
def list_parameter_values(self, **kwargs):
"""Liste aller Messwerte # noqa: E501
Gibt eine filterbare Liste aller Messwerte in der Datenbank zurück. Filterbare Attribute sind die Felder die aus dem ParameterValue Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameter_values(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2003
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameter_values_endpoint.call_with_http_info(**kwargs)
def list_parameters_biologie(self, **kwargs):
"""Liste aller Parameter im Biologie Kompartiment # noqa: E501
Gibt eine filterbare Liste aller Parameter in der Datenbank aus dem Kompartiment Biologie zurück. Filterbare Attribute sind die Felder die aus dem Parameter Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameters_biologie(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2004
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameters_biologie_endpoint.call_with_http_info(**kwargs)
def list_parameters_biota(self, **kwargs):
"""Liste aller Parameter im Biota Kompartiment # noqa: E501
Gibt eine filterbare Liste aller Parameter in der Datenbank aus dem Kompartiment Biota zurück. Filterbare Attribute sind die Felder die aus dem Parameter Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameters_biota(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2005
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameters_biota_endpoint.call_with_http_info(**kwargs)
def list_parameters_sediment(self, **kwargs):
"""Liste aller Parameter im Sediment Kompartiment # noqa: E501
Gibt eine filterbare Liste aller Parameter in der Datenbank aus dem Kompartiment Sediment zurück. Filterbare Attribute sind die Felder die aus dem Parameter Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameters_sediment(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2007
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameters_sediment_endpoint.call_with_http_info(**kwargs)
def list_parameters_wasser(self, **kwargs):
"""Liste aller Parameter im Wasser Kompartiment # noqa: E501
Gibt eine filterbare Liste aller Parameter in der Datenbank aus dem Kompartiment Wasser zurück. Filterbare Attribute sind die Felder die aus dem Parameter Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_parameters_wasser(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2006
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_parameters_wasser_endpoint.call_with_http_info(**kwargs)
def list_plc_stations(self, **kwargs):
"""Liste aller HELCOM PLC Stationen # noqa: E501
Gibt eine filterbare Liste aller HELCOM PLC Stationen in der Datenbank zurück. Filterbare Attribute sind die Felder die aus dem HelcomPLCStation Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_plc_stations(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse2008
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_plc_stations_endpoint.call_with_http_info(**kwargs)
def list_projekt_stationen(self, **kwargs):
"""Liste aller Projekt Stationen # noqa: E501
Gibt eine filterbare Liste aller Projektstation in der Datenbank zurück. Filterbare Attribute sind die Felder die aus dem ProjectStation Schema kommen. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.list_projekt_stationen(async_req=True)
>>> result = thread.get()
Keyword Args:
filter_request (FilterRequest): [optional]
_return_http_data_only (bool): response data without head status
code and headers. Default is True.
_preload_content (bool): if False, the urllib3.HTTPResponse object
will be returned without reading/decoding response data.
Default is True.
_request_timeout (int/float/tuple): timeout setting for this request. If
one number provided, it will be total request timeout. It can also
be a pair (tuple) of (connection, read) timeouts.
Default is None.
_check_input_type (bool): specifies if type checking
should be done one the data sent to the server.
Default is True.
_check_return_type (bool): specifies if type checking
should be done one the data received from the server.
Default is True.
_host_index (int/None): specifies the index of the server
that we want to use.
Default is read from the configuration.
async_req (bool): execute request asynchronously
Returns:
InlineResponse200
If the method is called asynchronously, returns the request
thread.
"""
kwargs["async_req"] = kwargs.get("async_req", False)
kwargs["_return_http_data_only"] = kwargs.get("_return_http_data_only", True)
kwargs["_preload_content"] = kwargs.get("_preload_content", True)
kwargs["_request_timeout"] = kwargs.get("_request_timeout", None)
kwargs["_check_input_type"] = kwargs.get("_check_input_type", True)
kwargs["_check_return_type"] = kwargs.get("_check_return_type", True)
kwargs["_host_index"] = kwargs.get("_host_index")
return self.list_projekt_stationen_endpoint.call_with_http_info(**kwargs)
|
class Solution(object):
def wordPattern(self, pattern, s):
"""
:type pattern: str
:type s: str
:rtype: bool
"""
d = {}
words = s.split(' ')
n = len(pattern)
if (n != len(words)):
return False
for i in range(n):
key = pattern[i]
w = words[i]
if (not key in d):
if (not w in d.values()):
d[key] = w
else:
return False
elif not d[key] == w:
return False
return True |
"""Script to pre-compile chameleon templates to the cache.
This script is useful if the time to compile chameleon templates is
unacceptably long. It finds and compiles all templates within a directory,
saving the result in the cache configured via the CHAMELEON_CACHE environment
variable.
"""
import os
import sys
import logging
import optparse
from multiprocessing import Pool
import chameleon.config
from pyramid_chameleon.zpt import PyramidPageTemplateFile
def _compile_one(args):
fullpath, template_factory, fail_fast, cache_dir = (
args[0],
args[1],
args[2],
args[3],
)
try:
compile_one(fullpath, cache_dir, template_factory)
except KeyboardInterrupt:
return dict(path=fullpath, success=False)
except Exception as e:
if fail_fast:
raise
logging.error('Failed to compile: %s' % fullpath, exc_info=e)
return dict(path=fullpath, success=False)
logging.debug('Compiled: %s' % fullpath)
return dict(path=fullpath, success=True)
def compile_one(fullpath, cache_dir, template_factory=PyramidPageTemplateFile):
chameleon.config.CACHE_DIRECTORY = cache_dir
template = template_factory(fullpath, macro=None)
template.cook_check()
def _walk_dir(directory, extensions):
ret = []
for dirpath, dirnames, filenames in os.walk(directory):
for filename in filenames:
if filename.startswith('.'):
continue
_, ext = os.path.splitext(filename)
if ext not in extensions:
continue
fullpath = os.path.join(dirpath, filename)
ret.append(fullpath)
return ret
def walk_dir(
directory,
cache_dir,
extensions=frozenset(['.pt']),
template_factory=PyramidPageTemplateFile,
fail_fast=False,
jobs=1,
):
pool = Pool(processes=jobs)
mapped_args = [
(fullpath, template_factory, fail_fast, cache_dir)
for fullpath in _walk_dir(directory, extensions)
]
try:
for result in pool.map(_compile_one, mapped_args):
yield result
finally:
pool.close()
pool.join()
def precompile(argv=sys.argv):
parser = optparse.OptionParser(usage="""usage: %prog [options]
Compile chameleon templates, saving the results in the chameleon cache.
The CACHE_DIRECTORY environment variable MUST be set to the directory where the
templates will be stored.
By default the exit code of this script will be 0 if one template was found and
compiled.
""")
parser.add_option(
"--fail-fast",
dest="fail_fast",
default=False,
action="store_true",
help="Exit with non-zero exit code on the first "
"template which fails compillation.")
parser.add_option(
"--dir",
dest="dir",
help="The directory to search for templates. "
"Will be recursively searched")
parser.add_option(
"--ext",
dest="exts",
action="append",
help="The file extensions to search for, "
"can be specified more than once."
"The default is to look only for the .pt extension.")
parser.add_option(
"--loglevel",
dest="loglevel",
help="set the loglevel, see the logging module for possible values",
default='INFO')
parser.add_option(
"--jobs",
type=int,
dest="jobs",
help="set the N compile jobs",
default=1)
parser.add_option(
"--cache-dir",
dest="cache_dir",
help="Use this directory as the Chameleon cache directory. Either "
"this option or the CHAMELEON_CACHE environment variable"
"must be specified"),
options, args = parser.parse_args(argv)
loglevel = getattr(logging, options.loglevel)
logging.basicConfig(level=loglevel)
cache_dir = options.cache_dir or chameleon.config.CACHE_DIRECTORY
if cache_dir is None:
logging.error(
"Either the --cache_dir option or the the CHAMELEON_CACHE "
"environment variable must be specified"
)
return 1
if len(args) > 1:
msg = ' '.join(args[1:])
logging.error(
'This command takes only keyword arguments, got: %s' % msg
)
return 1
exts = options.exts
if not exts:
exts = ['.pt']
exts = set(exts)
success = total = 0
for f in walk_dir(
options.dir,
cache_dir,
extensions=exts,
fail_fast=options.fail_fast,
jobs=options.jobs,
):
total += 1
if f['success']:
success += 1
logging.info('Compiled %s out of %s found templates' % (success, total))
if not success:
logging.error(
"No templates successfully compiled out of %s found" % total
)
return 1
return 0
if __name__ == '__main__': # pragma: no cover
sys.exit(precompile())
|
# Generated by Django 3.1.7 on 2021-04-19 04:23
import datetime
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hoosfit', '0012_auto_20210419_0023'),
]
operations = [
migrations.AlterField(
model_name='profile',
name='previous_workout',
field=models.DateField(default=datetime.date(2021, 4, 18)),
),
]
|
from streamer import streamer
import cv2
#redis server host:port
HOST = "0.0.0.0"
PORT = 6379
#webcam ID
DEVICE = 0
WIDTH = 1280
HEIGHT = 720
QUALITY = 70
s = streamer(host=HOST, port=PORT)
cap = cv2.VideoCapture(DEVICE)
cap.set(3,WIDTH)
cap.set(4,HEIGHT)
#mjpeg
cap.set(6,1196444237.0)
while True:
ret, frame = cap.read()
if not ret:
break
# TODO function(frame)
cv2.imshow("client Webcam", frame)
s.broadcast(frame, quality=QUALITY)
key = cv2.waitKey(1)
if key == ord("q"):
break
cap.release()
cv2.destroyAllWindows() |
import sys
import numpy as np
def squareMatrixBenchmark(n):
m1 = np.random.random([n,n])
m2 = np.random.random([n,n])
m3 = m1 @ m2
if __name__ == "__main__":
if(len(sys.argv) != 2):
print("usage: python numpyBenchmark.py n")
exit()
n = int(sys.argv[1])
np.random.seed(0)
squareMatrixBenchmark(n)
|
import re
import csv
import matplotlib.pyplot as plt
import numpy
import operator
import epubreader as er
import rangefreq as rf
import multiprocessing as mp
def comp_vari(freqlist):
## Compute coefficient of variation
freqnp = numpy.array(freqlist)
if numpy.mean(freqnp) == 0:
return 0
else:
return numpy.std(freqnp) / numpy.mean(freqnp)
def even_word(leng):
## get ten most evenly distributed words and variated ones
loc0 = er.Location([1, 1, 1])
loc1 = er.Location([100, 100, 100])
text = er.get_range_text(er.filenum, loc0, loc1)[0]
count = rf.word_count(text)
## cut texts into chunks of length leng
textarr = rf.cut_text(text, leng)
freqs, freqsarr = rf.all_word_freq(textarr, count, leng)
wordstd = dict()
for word in freqs.keys():
wordstd[word] = comp_vari(rf.word_freq(word, freqsarr))
rank_word = sorted(wordstd.items(), key = operator.itemgetter(1), reverse = False)
return rank_word
def even_nword(leng, ngram):
## get ten most evenly distributed words and variated ones
loc0 = er.Location([1, 1, 1])
loc1 = er.Location([100, 100, 100])
text, punc_text = er.get_range_text(er.filenum, loc0, loc1)
count = rf.word_count(text)
## cut texts into chunks of length leng
nwordcv = dict()
nwordcv0 = dict()
nwordcv1 = dict()
## compute coefficient of variation based on book or word numbers
if leng == 'book':
leng = 1000
textarr = rf.cut_text(text, leng)
freqs, freqsarr0, freqsarr1 = rf.all_nword_freq(textarr, text, punc_text, ngram, leng)
freqsarr = rf.all_nword_freq_book(freqs, ngram)
for nword in freqs.keys():
nwordcv[nword] = comp_vari(rf.word_freq(nword, freqsarr))
else:
textarr = rf.cut_text(text, leng)
freqs, freqsarr0, freqsarr1 = rf.all_nword_freq(textarr, text, punc_text, ngram, leng)
for nword in freqs.keys():
nwordcv0[nword] = comp_vari(rf.word_freq(nword, freqsarr0))
nwordcv1[nword] = comp_vari(rf.word_freq(nword, freqsarr1))
rank_nword = sorted(nwordcv.items(), key = operator.itemgetter(1), reverse = False)
rank_nword0 = sorted(nwordcv0.items(), key = operator.itemgetter(1), reverse = False)
rank_nword1 = sorted(nwordcv1.items(), key = operator.itemgetter(1), reverse = False)
return rank_nword, rank_nword0, rank_nword1
def csv_writer(ranks, csvfile):
## write csv into an opened file, return top_even and top_uneven
rankwriter = csv.writer(csvfile, delimiter=',')
top_even = []
top_uneven = []
num = 0
print '\nTop 25 word or phrase that are evenly distributed:'
for rank in ranks:
if rank[1]:
num += 1
nword = str(rank[0])
top_even.append(nword)
print nword
if num > 25:
break
print '\nTop 25 word or phrase that are unevenly distributed:\n'
for rank in ranks[-25: ]:
nword = str(rank[0])
top_uneven.append(nword)
print nword
for rank in ranks:
if rank[1]:
rankwriter.writerow([str(rank[0]), rank[1]])
return top_even, top_uneven
def rank_writer(leng, ngram):
## leng can either be word number or string 'book'
ranks, ranks0, ranks1 = even_nword(leng, ngram)
if any(ranks):
with open('../doc/rank_nword/rank_nword_' + str(leng) + '.csv', 'wb') as csvfile:
csv_writer(ranks, csvfile)
if any(ranks0):
with open('../doc/rank0_nword/rank0_nword_' + str(leng) + '.csv', 'wb') as csvfile:
csv_writer(ranks0, csvfile)
if any(ranks1):
with open('../doc/rank1_nword/rank1_nword_' + str(leng) + '.csv', 'wb') as csvfile:
csv_writer(ranks1, csvfile)
return
def multicore_rank_writer(core, leng):
## run faster using multiprocessing on multicores of CPU
pool = mp.Pool(processes = core)
pool.map(rank_writer, leng)
return
if __name__ == "__main__":
# multicore_rank_writer(6, numpy.arange(1000,11000,1000))
rank_writer('book', 1)
|
# -*- coding: utf-8 -*-
from django.conf import settings
if settings.DATABASE_ENGINE=='pool':
settings.DATABASE_ENGINE=settings.POOL_DATABASE_ENGINE
from django.core.management.base import BaseCommand, CommandError
import os
import time
import sys
from mysite.iclock.constant import REALTIME_EVENT, DEVICE_POST_DATA
import thread
import datetime
def process_writedata_handler():
from mysite.iclock.models.model_cmmdata import process_writedata
from django.db import connection as conn
while True:
process_writedata()
try:
#退出的时候防止是断网,不断开连接而不能处理数据,所以关闭该连接
cur=conn.cursor()
cur.close()
conn.close()
except:
pass
def process_upload_handler():
from mysite.iclock.models.model_device import Device
from mysite.iclock.models.model_devcmd import DevCmd
from mysite.iclock.models.model_trans import Transaction
startTime=datetime.date.today() - datetime.timedelta(days=3)
EndTime=datetime.date.today()
devices=Device.objects.all()
for dev in devices:
trans=Transaction.objects.filter(sn_name=dev.sn,TTime__range=(startTime, EndTime))
cmdStr="ACCOUNT Start=%s End=%s Count=%s"%(str(startTime),str(EndTime),str(len(trans)))
try:
cmd=DevCmd(SN=dev, CmdContent=cmdStr, CmdCommitTime=datetime.datetime.now())
cmd.save(force_insert=True)
except:
import traceback
traceback.print_exc()
class Command(BaseCommand):
option_list = BaseCommand.option_list + ()
help = "Starts write data process."
args = ''
def handle(self, *args, **options):
from base.sync_api import SYNC_MODEL
if SYNC_MODEL:
from server_update import update_user,update_device, clean_cache
from base.sync_api import server_update_data as ser_update
last_clean = datetime.datetime.now()
while True:
now = datetime.datetime.now()
if (now - last_clean).seconds>3600:
thread.start_new_thread(clean_cache,())
last_clean = now
ret = ser_update()
emps = ret['employee']
devs = ret['device']
update_user(emps)
update_device(devs)
time.sleep(1)
else:
thread.start_new_thread(process_writedata_handler,())
first = True
while True:
if first:
h = datetime.datetime.now().hour
if h==3:
thread.start_new_thread(process_upload_handler,())
first = False
else:
time.sleep(60)
else:
thread.start_new_thread(process_upload_handler,())
time.sleep(60*60*24)
|
# -*- coding: utf-8 -*-
from __future__ import print_function
from matplotlib import pyplot as plt
import matplotlib.image as mpimg
import numpy as np
import scipy.misc
import random
import os
import imageio
#############################
# global variables #
#############################
root_dir = "/home/water/DATA/camvid-master"
data_dir = os.path.join(root_dir, "701_StillsRaw_full") # train data
label_dir = os.path.join(root_dir, "LabeledApproved_full") # train label
label_colors_file = os.path.join(root_dir, "label_colors.txt") # color to label
val_label_file = os.path.join(root_dir, "val.csv") # validation file
train_label_file = os.path.join(root_dir, "train.csv") # train file
# create dir for label index
label_idx_dir = os.path.join(root_dir, "Labeled_idx")
if not os.path.exists(label_idx_dir):
os.makedirs(label_idx_dir)
label2color = {}
color2label = {}
label2index = {}
index2label = {}
def divide_train_val(val_rate=0.1, shuffle=True, random_seed=None):
data_list = os.listdir(data_dir) #返回这个目录里,所有内容,‘图1’‘,图2’......
data_len = len(data_list) #702个图片 #注意这里是训练集
val_len = int(data_len * val_rate) #训练集700张,分10%的数量给验证集
if random_seed: #设置随机种子
random.seed(random_seed) #看看后面哪里用
if shuffle:
#sample(seq, n) 从序列seq中选择n个随机且独立的元素
data_idx = random.sample(range(data_len), data_len)
# data_idx 是从0到702 随机排序的数组
else:
data_idx = list(range(data_len)) #这个就是从0到702 依次排序
val_idx = [data_list[i] for i in data_idx[:val_len]] # 前70个,图片名 List
train_idx = [data_list[i] for i in data_idx[val_len:]] # 71到702个
# !创建 create val.csv
# "w"打开一个文件只用于写入。如果该文件已存在则打开文件,
# 并从开头开始编辑,即原有内容会被删除。
# 如果该文件不存在,创建新文件。
v = open(val_label_file, "w")
v.write("img,label\n") #write() 方法用于向文件中写入指定字符串
for idx, name in enumerate(val_idx):
if 'png' not in name: ##跳过损坏文件
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
v.write("{},{}\n".format(img_name, lab_name))
#最后生成了一个.csv文件,位于根目录
## 装的信息是: 2列,一列是验证集,70张 生图路径+名字,第二列是验证集对应的:标签图+名字+.npy
#png.npy :后面parse_label函数,就是在标签图路径里 生成 标签图+名字+.npy 文件!!!
# create train.csv 所以这2个.csv文件,这里存放的是信息 ,是: 生图信息和标签图+npy信息
t = open(train_label_file, "w")
t.write("img,label\n")
for idx, name in enumerate(train_idx):
if 'png' not in name:
continue
img_name = os.path.join(data_dir, name)
lab_name = os.path.join(label_idx_dir, name)
lab_name = lab_name.split(".")[0] + "_L.png.npy"
t.write("{},{}\n".format(img_name, lab_name))
#parse:分析 分析标签
def parse_label():
# change label to class index
#“r”:以只读方式打开文件。文件的指针将会放在文件的开头。这是默认模式。
#label_colors.txt :!!装的是颜色和对应标签 64 128 64\tAnimal 颜色\t类别
# 只读,读好了之后 #不igore 就会bug
f = open(label_colors_file, "r").read().split("\n")[:-1] # ignore the last empty line
for idx, line in enumerate(f):
label = line.split()[-1] #提取所有label形成一个字符串 #动物,人,墙..
color = tuple([int(x) for x in line.split()[:-1]]) #形成一个元组 对应动物,人,墙..
#的颜色,比如动物的颜色是红色 :[128,0,0]....
print(label, color)
#d[key] = value
#设置d[key]的值为value,如果该key不存在,则为新增
#label2color[label] = color 运行后:
#就形成了1个字典: 以label做key,以color做value的新字典
#包含内容:{'Animal': (64, 128, 64), 'Archway': (192, 0, 128).....}
#后面有精彩用法....
label2color[label] = color
color2label[color] = label #{颜色:标签}
label2index[label] = idx # {标签:idx} {'Animal': 0, 'Archway': 1...}
index2label[idx] = label # {idx:标签}
#下面是作者自己标注的:
# rgb = np.zeros((255, 255, 3), dtype=np.uint8)
# rgb[..., 0] = color[0]
# rgb[..., 1] = color[1]
# rgb[..., 2] = color[2]
# imshow(rgb, title=label)
#enumerate :迭代器,0号,内容0;1号,内容1
for idx, name in enumerate(os.listdir(label_dir)): #os.listdir(label_dir) 是标签集里所有图片
#idx就是从0开始的序号 name是图片名 #os.listdir() 方法用于返回指定的文件夹包含的文件或文件夹的名字的列表,这个列表以字母顺序。
filename = os.path.join(label_idx_dir, name) # labeled_idx/所有图片名
if os.path.exists(filename + '.npy'): #检查是否有图片名.png.npy,当前应该是没有的
print("Skip %s" % (name)) #有了就跳过这个图 npy是numpy文件
continue
print("Parse %s" % (name)) ## 打出:Parse 图片名(不包含路径)
img = os.path.join(label_dir, name) ## img是路径,LabeledApproved_full/所有图片名
## 区分一下 和 filename之间的用法和关联?
img = imageio.imread(img) #用numpy(npy)格式打开一个图
height, weight, _ = img.shape # numpy存储图片格式(高,宽,3通道)
#Tensor是(3,高,宽)
#在大for循环里,对每一张图执行下面操作 img是上面读取的一个npy格式的图哈
idx_mat = np.zeros((height, weight)) #720*960
for h in range(height):
for w in range(weight): #前面也有个color啊,不同作用域功能不同
color = tuple(img[h, w]) # tuple(序列),把序列转为元组
#这里应该是把img[h,w]这个!像素点!(128,64,64)
# 抓出来弄成了一个元组,又因为遍历
#所以color是一个有 height*weight个元素的tuple
#color包含着这个图片里,所有的颜色
try: #try,except: 异常检测,try里顺序执行,如果,去执行except
#tuple类型的color在这里作为key,输出相应的value,也就是label值,dict的存储是一一对应的
#所以 出来的label是和输入的color 一一对应
label = color2label[color] # 给彩图像素点,返回像素点的label,就像是上面那图里只有猫和北京,返回:cat space
index = label2index[label] # 给label返回类型代表的号码,给cat sapce,返回1,5
idx_mat[h, w] = index #构成了一个由颜色到标签到标签序号处理后的图,一个点一个点送?
except:
print("error: img:%s, h:%d, w:%d" % (name, h, w))
idx_mat = idx_mat.astype(np.uint8) #转换数据类型
np.save(filename, idx_mat) #numpy.save(file, arr, allow_pickle=True, fix_imports=True)
#把当前(因为这个for里是逐像素点处理一张图)这个图的信息(numpy)存起来
print("Finish %s" % (name))
#跳出for,这个位置就是处理好了所有的图,生成了702个 png.npy图
#生成的这个是一个numpy图,每个图上,是标记好的序号
#就像 一个张图里是 建筑和空白,建筑位置上显示:4,4 = buildings标签 = buildings颜色[128,0,0]
# test some pixels' label ~~~~~~~~~~~~~~~~~~~~~~~~~~`
#img = os.path.join(label_dir, os.listdir(label_dir)[0]) #img数据:img[height,weight,rgb]
#img = imageio.imread(img)
#test_cases = [(555, 405), (0, 0), (380, 645), (577, 943)] # img[555,405]:此图此点的!位置信息!
#test_ans = ['Car', 'Building', 'Truck_Bus', 'Car'] #这个是肉眼去看哈,看上面的位置,对应的是啥label
#for idx, t in enumerate(test_cases):
#color = img[t] #相当于访问 img上的4个点的位置信息,输出的是这4个点对应的像素值(img是labeled,就那32个规整的颜色)
#assert color2label[tuple(color)] == test_ans[idx] ##检查一下对不对
#上面是作者乱标的,所以报错,我在jupyter通过肉眼看图并且调试,就对了哈!!
'''debug function'''
def imshow(img, title=None):
try:
img = mpimg.imread(img) #mpimg: matplotlib.image 输入的img是个地址哈,不是啥处理后的numpy数组
imgplot = plt.imshow(img)
except:
plt.imshow(img, interpolation='nearest')
if title is not None:
plt.title(title)
plt.show()
if __name__ == '__main__':
print("it starts working")
divide_train_val(random_seed=1)
parse_label()
print("process finished") |
"""
This file is for test blockus_data.py
"""
import numpy as np
import torch as tr
import unittest as ut
import blockus_data as bd
import blockus_game as bg
class BlockusDataTestCase(ut.TestCase):
def test_encode(self):
state = bg.initial_state(board_size=2)
actual = bd.encode(state)
expected = tr.zeros(3,2,2)
expected[0,:,:] = 1.
self.assertTrue(tr.allclose(actual, expected))
state = state.perform(state.valid_actions()[0])
actual = bd.encode(state)
expected[:2,0,0] = tr.tensor([0.,1.])
self.assertTrue(tr.allclose(actual, expected))
state = state.perform(state.valid_actions()[0])
actual = bd.encode(state)
expected[[0, 2],1,1] = tr.tensor([0.,1.])
self.assertTrue(tr.allclose(actual, expected))
def test_get_batch(self):
inputs, outputs = bd.get_batch(
board_size=2, polyomino_size=2, num_games=1, num_rollouts=1, max_depth=2,
choose_method=lambda node: node.children()[0])
self.assertTrue(tr.allclose(outputs, tr.zeros(6,1)))
inputs, outputs = bd.get_batch(
board_size=2, polyomino_size=2, num_games=1, num_rollouts=7, max_depth=2,
choose_method=lambda node: node.children()[np.argmin(node.get_visit_counts())])
self.assertTrue(tr.allclose(outputs, tr.tensor([[-2/3,1/2,1/2,-1,0]]).t()))
expected = tr.zeros(3,2,2)
expected[0] = 1.
expected[:2,0,0] = tr.tensor([0, 1])
self.assertTrue(tr.allclose(inputs[0], expected))
if __name__ == "__main__":
test_suite = ut.TestLoader().loadTestsFromTestCase(BlockusDataTestCase)
res = ut.TextTestRunner(verbosity=2).run(test_suite)
num, errs, fails = res.testsRun, len(res.errors), len(res.failures)
print("score: %d of %d (%d errors, %d failures)" % (num - (errs+fails), num, errs, fails))
|
class Course:
def __init__(self,name):
self.name=name
self.graduated=False
def graduateCourse(self):
self.graduated=True
def getGraduateStatus(self):
return self.graduated
def getCourseName(self):
return self.name |
# BASIC DATA TYPES
# STRINGS:
# in python, there are certain ways to express information like numbers and words
# to express letters or words use single or double quotation marks around the words.
# If you don't use single or double quotation marks python will assume that you're just writing a variable name
# and will throw an error if that variable doesn't exist
# this data type is called a string
# SIDENOTE: don't use single and double quotation marks in the same file
# or else your code will be ugly and no one will want to be its friend :(
# SIDENOTE: numbers and other symbols can also be in quotes and can be considered letters/words
# EXAMPLES:
"a" # valid way to express the leter a
"my words" # a valid way to express words
'my words' # also a valid way to express words
"my words123 456" # a valid way to express words with some numbers (note: the numbers here are treated as parts of words)
# "my words' # an invalid way to express words
# my words # an invalid way to express words
# "my words # an invalid way to express words
# EXERCISES:
# write 5 strings below and make sure this file still runs
# NUMBERS:
# there isn't any special notation to express numbers, just don't write them in quotes or else they'll
# be considered words/characters
# EXAMPLES:
1 # a valid way to express 1
2394 # a valid way to express 2394
"1" # not an invalid data type, but an invalid way to express a number that is supposed to be used as a number
# EXERCISES:
# write 5 numbers below and make sure this file still runs
# BOOLEANS:
# one of the things python can do is check if something is true or false
# you can also write code that will always be true or false to python by just writing
# True or False (with capital T and F)
# an expression of something being true or false is called a boolean in all programming language
# and it's an important thing to know even though it sounds kind of esoteric
# SIDENOTE: Frue and Talse are not valid booleans
# EXAMPLES:
True # a valid way to express True
False # a valid way to express false
# Talse # an invalid way to express schrödinger's booleans
# Frue # an invalid way to express schrödinger's boolean
# EXERCISES:
# write 5 booleans below and make sure this file still runs
# LISTS(also called arrays in other programming languages):
# lists are pretty self explanatory
# lists can store any and as many data types as you want (they can even store lists)
# you can also add, remove, and get items from a lists
# there's a lot more to lists but writing the data type is pretty easy
# to write a list, write data types inside square brackets "[]" and seperate them with commas
# SIDENOTE: a list of lists is called a 2 dimensional list, a list made of lists that are made of lists is a 3 dimensional list etc..
# EXAMPLES:
["my words", "a", 234, False, True] # a valid list
[] # a valid list
["a"] # a valid lsit
[[1,2,3], ["456", "789"]] # a valid list
# EXERCISES:
# write 5 lists with all of the data types you've learned so far (including lists). Make sure this file still runs
# ENDNOTE:
# these are only the basic datatypes in python. There are a couple other datatypes that aren't included here (mainly dictionaries)
# that you'll discover later
|
from Stack import Stack
def infToPost(exp):
posexp=[]
opstack=Stack()
prec={"(":1,"*":3,"+":2,"-":2,"/":3,"**":3}
tokenList=exp.split()
print(tokenList)
for token in tokenList:
if token in "ABCDEFGHIJKLMNOPQRSTUVWXYZ" or token in "1234567890":
posexp.append(token)
elif token =="(":
opstack.push(token)
elif token ==")":
toptok=opstack.pop()
while toptok!="(":
posexp.append(toptok)
toptok=opstack.pop()
else:
while(not opstack.isEmpty()) and (prec[opstack.peek()]>prec[token]):
posexp.append(opstack.pop())
opstack.push(token)
while not opstack.isEmpty():
posexp.append(opstack.pop())
return " ".join(posexp)
print(infToPost("5 * 3 ** ( 4 - 2 )"))
|
#!/usr/bin/env python
def r8lib_test ( ):
#*****************************************************************************80
#
## R8LIB_TEST tests the R8LIB library.
#
# Licensing:
#
# This code is distributed under the GNU LGPL license.
#
# Modified:
#
# 24 May 2015
#
# Author:
#
# John Burkardt
#
from agm_values import agm_values_test
from gamma_values import gamma_values_test
from gamma_log_values import gamma_log_values_test
from i4_log_10 import i4_log_10_test
from i4_sign import i4_sign_test
from i4_uniform_ab import i4_uniform_ab_test
from i4vec_indicator0 import i4vec_indicator0_test
from i4vec_indicator1 import i4vec_indicator1_test
from i4vec_print import i4vec_print_test
from i4vec_transpose_print import i4vec_transpose_print_test
from perm0_check import perm0_check_test
from perm0_uniform import perm0_uniform_test
from perm1_check import perm1_check_test
from perm1_uniform import perm1_uniform_test
from r8_abs import r8_abs_test
from r8_acos import r8_acos_test
from r8_acosh import r8_acosh_test
from r8_add import r8_add_test
from r8_agm import r8_agm_test
from r8_asinh import r8_asinh_test
from r8_atan import r8_atan_test
from r8_atanh import r8_atanh_test
from r8_big import r8_big_test
from r8_cas import r8_cas_test
from r8_ceiling import r8_ceiling_test
from r8_choose import r8_choose_test
from r8_cosd import r8_cosd_test
from r8_cotd import r8_cotd_test
from r8_cscd import r8_cscd_test
from r8_cube_root import r8_cube_root_test
from r8_diff import r8_diff_test
from r8_digit import r8_digit_test
from r8_e import r8_e_test
from r8_epsilon import r8_epsilon_test
from r8_epsilon_compute import r8_epsilon_compute_test
from r8_factorial import r8_factorial_test
from r8_factorial_values import r8_factorial_values_test
from r8_factorial2 import r8_factorial2_test
from r8_factorial2_values import r8_factorial2_values_test
from r8_fall import r8_fall_test
from r8_fall_values import r8_fall_values_test
from r8_fractional import r8_fractional_test
from r8_gamma import r8_gamma_test
from r8_gamma_log import r8_gamma_log_test
from r8_huge import r8_huge_test
from r8_log_2 import r8_log_2_test
from r8_log_b import r8_log_b_test
from r8_mant import r8_mant_test
from r8_max import r8_max_test
from r8_min import r8_min_test
from r8_mod import r8_mod_test
from r8_modp import r8_modp_test
from r8_mop import r8_mop_test
from r8_nint import r8_nint_test
from r8_normal_01 import r8_normal_01_test
from r8_normal_ab import r8_normal_ab_test
from r8_pi import r8_pi_test
from r8_power import r8_power_test
from r8_power_fast import r8_power_fast_test
from r8_rise import r8_rise_test
from r8_rise_values import r8_rise_values_test
from r8_round2 import r8_round2_test
from r8_roundb import r8_roundb_test
from r8_roundx import r8_roundx_test
from r8_secd import r8_secd_test
from r8_sign import r8_sign_test
from r8_sind import r8_sind_test
from r8_swap import r8_swap_test
from r8_swap3 import r8_swap3_test
from r8_tand import r8_tand_test
from r8_to_i4 import r8_to_i4_test
from r8_to_r8_discrete import r8_to_r8_discrete_test
from r8_uniform_01 import r8_uniform_01_test
from r8_uniform_ab import r8_uniform_ab_test
from r8_walsh_1d import r8_walsh_1d_test
from r8_wrap import r8_wrap_test
from r82col_print_part import r82col_print_part_test
from r82row_print_part import r82row_print_part_test
from r83col_print_part import r83col_print_part_test
from r83row_print_part import r83row_print_part_test
from r8col_swap import r8col_swap_test
from r8mat_house_axh import r8mat_house_axh_test
from r8mat_house_form import r8mat_house_form_test
from r8mat_indicator import r8mat_indicator_test
from r8mat_mm import r8mat_mm_test
from r8mat_mtm import r8mat_mtm_test
from r8mat_mtv import r8mat_mtv_test
from r8mat_mv import r8mat_mv_test
from r8mat_nint import r8mat_nint_test
from r8mat_nonzeros import r8mat_nonzeros_test
from r8mat_norm_fro import r8mat_norm_fro_test
from r8mat_norm_l1 import r8mat_norm_l1_test
from r8mat_norm_li import r8mat_norm_li_test
from r8mat_print import r8mat_print_test
from r8mat_print_some import r8mat_print_some_test
from r8mat_sub import r8mat_sub_test
from r8mat_transpose import r8mat_transpose_test
from r8mat_transpose_print import r8mat_transpose_print_test
from r8mat_transpose_print_some import r8mat_transpose_print_some_test
from r8mat_uniform_01 import r8mat_uniform_01_test
from r8mat_uniform_ab import r8mat_uniform_ab_test
from r8poly_degree import r8poly_degree_test
from r8poly_print import r8poly_print_test
from r8poly_value_horner import r8poly_value_horner_test
from r8vec_amax import r8vec_amax_test
from r8vec_amin import r8vec_amin_test
from r8vec_asum import r8vec_asum_test
from r8vec_concatenate import r8vec_concatenate_test
from r8vec_copy import r8vec_copy_test
from r8vec_direct_product import r8vec_direct_product_test
from r8vec_house_column import r8vec_house_column_test
from r8vec_indicator0 import r8vec_indicator0_test
from r8vec_indicator1 import r8vec_indicator1_test
from r8vec_linspace import r8vec_linspace_test
from r8vec_max import r8vec_max_test
from r8vec_mean import r8vec_mean_test
from r8vec_min import r8vec_min_test
from r8vec_nint import r8vec_nint_test
from r8vec_norm_l0 import r8vec_norm_l0_test
from r8vec_norm_l2 import r8vec_norm_l2_test
from r8vec_norm_li import r8vec_norm_li_test
from r8vec_permute import r8vec_permute_test
from r8vec_permute_uniform import r8vec_permute_uniform_test
from r8vec_print import r8vec_print_test
from r8vec_product import r8vec_product_test
from r8vec_sum import r8vec_sum_test
from r8vec_uniform_01 import r8vec_uniform_01_test
from r8vec_uniform_ab import r8vec_uniform_ab_test
from r8vec_variance import r8vec_variance_test
from roots_to_r8poly import roots_to_r8poly_test
from timestamp import timestamp_test
print ''
print 'R8LIB_TEST'
print ' Python version:'
print ' Test the R8LIB library.'
agm_values_test ( )
gamma_values_test ( )
gamma_log_values_test ( )
i4_log_10_test ( )
i4_sign_test ( )
i4_uniform_ab_test ( )
i4vec_indicator0_test ( )
i4vec_indicator1_test ( )
i4vec_print_test ( )
i4vec_transpose_print_test ( )
perm0_check_test ( )
perm0_uniform_test ( )
perm1_check_test ( )
perm1_uniform_test ( )
r8_abs_test ( )
r8_acos_test ( )
r8_acosh_test ( )
r8_add_test ( )
r8_agm_test ( )
r8_asinh_test ( )
r8_atan_test ( )
r8_atanh_test ( )
r8_big_test ( )
r8_cas_test ( )
r8_ceiling_test ( )
r8_choose_test ( )
r8_cosd_test ( )
r8_cotd_test ( )
r8_cscd_test ( )
r8_cube_root_test ( )
r8_diff_test ( )
r8_digit_test ( )
r8_e_test ( )
r8_epsilon_test ( )
r8_epsilon_compute_test ( )
r8_factorial_test ( )
r8_factorial_values_test ( )
r8_factorial2_test ( )
r8_factorial2_values_test ( )
r8_fall_test ( )
r8_fall_values_test ( )
r8_fractional_test ( )
r8_gamma_test ( )
r8_gamma_log_test ( )
r8_huge_test ( )
r8_log_2_test ( )
r8_log_b_test ( )
r8_mant_test ( )
r8_max_test ( )
r8_min_test ( )
r8_mod_test ( )
r8_modp_test ( )
r8_mop_test ( )
r8_nint_test ( )
r8_normal_01_test ( )
r8_normal_ab_test ( )
r8_pi_test ( )
r8_power_test ( )
r8_power_fast_test ( )
r8_rise_test ( )
r8_rise_values_test ( )
r8_round2_test ( )
r8_roundb_test ( )
r8_roundx_test ( )
r8_secd_test ( )
r8_sign_test ( )
r8_sind_test ( )
r8_swap_test ( )
r8_swap3_test ( )
r8_tand_test ( )
r8_to_i4_test ( )
r8_to_r8_discrete_test ( )
r8_uniform_01_test ( )
r8_uniform_ab_test ( )
r8_walsh_1d_test ( )
r8_wrap_test ( )
r82col_print_part_test ( )
r82row_print_part_test ( )
r83col_print_part_test ( )
r83row_print_part_test ( )
r8col_swap_test ( )
r8mat_house_axh_test ( )
r8mat_house_form_test ( )
r8mat_indicator_test ( )
r8mat_mm_test ( )
r8mat_mtm_test ( )
r8mat_mtv_test ( )
r8mat_mv_test ( )
r8mat_nint_test ( )
r8mat_nonzeros_test ( )
r8mat_norm_fro_test ( )
r8mat_norm_l1_test ( )
r8mat_print_test ( )
r8mat_print_some_test ( )
r8mat_sub_test ( )
r8mat_transpose_test ( )
r8mat_transpose_print_test ( )
r8mat_transpose_print_some_test ( )
r8mat_uniform_01_test ( )
r8mat_uniform_ab_test ( )
r8poly_degree_test ( )
r8poly_print_test ( )
r8poly_value_horner_test ( )
r8vec_amax_test ( )
r8vec_amin_test ( )
r8vec_asum_test ( )
r8vec_concatenate_test ( )
r8vec_copy_test ( )
r8vec_direct_product_test ( )
r8vec_house_column_test ( )
r8vec_indicator0_test ( )
r8vec_linspace_test ( )
r8vec_max_test ( )
r8vec_mean_test ( )
r8vec_min_test ( )
r8vec_nint_test ( )
r8vec_norm_l0_test ( )
r8vec_norm_l2_test ( )
r8vec_norm_li_test ( )
r8vec_permute_test ( )
r8vec_permute_uniform_test ( )
r8vec_print_test ( )
r8vec_product_test ( )
r8vec_sum_test ( )
r8vec_uniform_01_test ( )
r8vec_uniform_ab_test ( )
r8vec_variance_test ( )
roots_to_r8poly_test ( )
timestamp_test ( )
#
# Terminate.
#
print ''
print 'R8LIB_TEST:'
print ' Normal end of execution.'
if ( __name__ == '__main__' ):
from timestamp import timestamp
timestamp ( )
r8lib_test ( )
timestamp ( )
|
from django.urls import path
from .views import launch_giveaway
app_name = 'giveaway'
urlpatterns = [
path('giveaway/launch', launch_giveaway, name='launch_giveaway')
]
|
import os
DIR = '../../fluffed_data/news/fluffed'
str_lookup = input('What are you looking for? ')
for f in os.listdir(DIR):
file_path = os.path.join(DIR, f)
with open(file_path, 'rb') as text:
content = text.read().decode('ascii')
if str_lookup in content.lower():
print(content)
if input('is this what you want? (y/n) ') == 'y':
break
|
#https://www.hackerrank.com/challenges/the-grid-search
T = int(input())
for _ in range(T):
R, C = map(int, input().split())
G = list(list(map(int, input())) for i in range(R))
r, c = map(int, input().split())
P = list(list(map(int, input())) for i in range(r))
found = False
for i in range(R):
for j in range(C):
if G[i][j] == P[0][0]:
found = True
for k in range(r):
if G[i+k][j:j+c] != P[k][:]:
found = False
break
if found: break
if found: break
if found: print("YES")
else: print("NO")
|
__author__ = '184766'
"""
Multiple inheritance example 2 demonstrates the diamond inheritance pattern ....breadth first search is used because
a diamond inheritance pattern creates ambiguity
"""
class A(object):
def dothis(self):
print "doing this in A"
class B(A):
pass
class C(A):
def dothis(self):
print "doing this in C"
class D(B,C): #
pass
d_instance = D()
d_instance.dothis()
print D.mro() # Prints out the method resolution order "mro"
|
import unittest
from problem import *
class Test(unittest.TestCase):
def tests(self):
self.assertEqual(add(2,11), 13)
self.assertEqual(add(0,1), 1)
self.assertEqual(add(0,0), 0)
self.assertEqual(add(16,18), 214)
self.assertEqual(add(26,39), 515)
self.assertEqual(add(122,81), 1103)
if __name__ == '__main__':
unittest.main()
|
from osgeo import ogr
import matplotlib.pyplot as plt
source = ogr.Open('/home/nlibassi/Geodesy/Thesis/Project/Vector/ITRF96TM30/ProfilePoints/2001_2015_ProfilePtsEdited.shp')
layer = source.GetLayer()
profNames = ['pr0000106', 'pr4000106', 'pr3000106', 'pr7770106', 'pr7000106', 'pr2000106', 'pr6000106', 'pr1000106', 'pr5000106']
hOffset = 0.891 #mean of ellipsoidal hts of shoreline pts in 2015 (m)
for pname in profNames:
distList = []
elevOrigList = []
elevNewList = []
diff11mList = []
layer.SetAttributeFilter("Filename = " + "'" + pname + "'")
for feature in layer:
distList.append(feature.GetField('DistanceTo'))
elevOrig = feature.GetField('Elevation_')
elevNew = feature.GetField('Depth2015')
elevOrigList.append(elevOrig)
if feature.GetField('Depth2015') is not None:
elevNewList.append(elevNew - hOffset)
else:
elevNewList.append(None)
plt.figure()
plt.plot(distList, elevOrigList)
plt.plot(distList, elevNewList)
plt.suptitle('Profile ' + pname[2:5], fontsize=14)
plt.xlabel('Distance from Benchmark (m)')
plt.ylabel('Depth (m)')
plt.legend(['June 2001', 'Dec 2015'], loc='upper right')
#plt.show()
plt.savefig('/home/nlibassi/Geodesy/Thesis/Project/ProfileData/Plots/Prof' + pname[2:5] + '_2001_2015_hOffset.png')
|
# Write a program that accepts sequence of lines as
# input and prints the lines after making all characters
# in the sentence capitalized.
lines = []
while True:
s = raw_input()
if s:
lines.append(s.upper())
else:
break
for sentence in lines:
print sentence
|
#!/usr/bin/env python3
# A simple http server that accepts GET and POST requests sendt as JSON data
# It will write the "hostname" and "flag" fields from JSON to a txt-file
from http.server import BaseHTTPRequestHandler, HTTPServer
from urllib.parse import urlparse
import json
class RequestHandler(BaseHTTPRequestHandler):
def do_GET(self):
parsed_path = urlparse(self.path)
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({
'method': self.command,
'path': self.path,
'real_path': parsed_path.query,
'query': parsed_path.query,
'request_version': self.request_version,
'protocol_version': self.protocol_version
}).encode())
return
def do_POST(self):
content_len = int(self.headers.get('content-length'))
post_body = self.rfile.read(content_len)
data = json.loads(post_body)
parsed_path = urlparse(self.path)
self.send_response(200)
self.end_headers()
self.wfile.write(json.dumps({
'method': self.command,
'path': self.path,
'real_path': parsed_path.query,
'query': parsed_path.query,
'request_version': self.request_version,
'protocol_version': self.protocol_version,
'body': data
}).encode())
with open('flags.txt', 'a') as flagfile:
flagfile.write(data['hostname'] + ' ' + data['flag'] + '\n')
return
if __name__ == '__main__':
server = HTTPServer(('0.0.0.0', 8000), RequestHandler)
print('Starting server at http://0.0.0.0:8000')
server.serve_forever()
|
import sys
import webbrowser
if len(sys.argv) < 2:
sys.exit(0)
youtube = 'https://www.youtube.com/results?search_query='
it = '+'.join(sys.argv[1:])
webbrowser.open(youtube + it)
|
from django import forms
from . import models
class formcreate(forms.Form):
sno = forms.IntegerField()
name = forms.CharField()
testtext = forms.URLField(widget=forms.Textarea)
class modelFormcreate(forms.ModelForm):
class Meta:
model=models.User
fields = "__all__" |
import unittest
import os
from utils.rbag.joints import JointsBagLoader, JointsBagSaver
from utils.pdt.trajectory import PtpTrajectory
class Test(unittest.TestCase):
def test_loadJoints(self):
loader = JointsBagLoader()
loader.read(os.path.dirname(os.path.realpath(__file__)) + "/ptp-trajectory.bag")
self.assertIsInstance(loader.data(), PtpTrajectory)
def test_saveJoints(self):
saver = JointsBagSaver()
saver.addJointStates([1., 2., 3., 4., 5., 6., 7.])
saver.addJointStates([2., 4., 6., 8., 10., 12., 14.])
saver.write(os.path.dirname(os.path.realpath(__file__)) + "/ptp-trajectory.temp1.bag")
def test_saveTrajectory(self):
ptpTraj = PtpTrajectory()
ptpTraj.addJoints([1., 2., 3., 4., 5., 6., 7.])
ptpTraj.addJoints([7., 6., 5., 4., 3., 2., 1])
saver = JointsBagSaver()
saver.setTrajectory( ptpTraj )
saver.write(os.path.dirname(os.path.realpath(__file__)) + "/ptp-trajectory.temp2.bag")
if __name__ == "__main__":
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import load_library
from tensorflow.python.platform import resource_loader
greedy_assignment_ops = load_library.load_op_library(
resource_loader.get_path_to_datafile('_greedy_assignment_ops.so'))
greedy_assignment = greedy_assignment_ops.greedy_assignment
|
#!/usr/bin/env python
"""
Project_Name: main, File_name: master_alt_mpi
Aufthor: kalabharath, Email: kalabharath@gmail.com
Date: 3/03/18 , Time:10:05 AM
"""
# sys.path.append('../../main/')
import argparse
import time
import traceback
from mpi4py import MPI
from ranking.NoeStageRank import *
import alt_smotif_search as alt_search
import utility.masterutil as mutil
import utility.io_util as io
import utility.alt_smotif_util as altutil
# Define MPI message tags
tags = mutil.enum('READY', 'DONE', 'EXIT', 'START')
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
name = MPI.Get_processor_name()
status = MPI.Status()
def killall(processes):
"""
Kill all the subprocess when requested
:param processes:
:return: True or False
"""
count = 0
while True:
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.READY:
comm.send(None, dest=source, tag=tags.EXIT)
count += 1
if count == processes -1:
break
return True
# ********************* Define cmd line argument parser *********************
parser = argparse.ArgumentParser(description='DINGO-Refine Master MPI process that manages all jobs.')
parser.add_argument('--infile', type=int, help='specify the top_hits file')
parser.add_argument('--stage', type=int, help='specify the stage of the Smotif assembly')
parser.add_argument('--numhits', type=int, help='Top number of hits to be selected')
args = parser.parse_args()
# ********************* Define cmd line argument parser *********************
# Rank '0' specifies the master process
if rank == 0:
# ********************* Extract top hits *********************
in_file = str(args.infile)+"_tophits.gzip"
print "infile ", in_file
try:
tasks = io.readGzipPickle(in_file)
print "len of tasks", len(tasks)
except:
traceback.print_exc()
print "There are no entries in the tophits file, nothing to refine"
killall(size)
exit()
# ********************* Generate and distribute job index array *********************
stime = time.time()
try:
if len(tasks):
pass
except:
print "killing all processes!"
killall(size)
exit()
# print tasks, len(tasks) # this will be the new tasks
task_index = 0 # control the number of processes with this index number
finished_task = 0
num_workers = size - 1 # 1 processor is reserved for master.
closed_workers = 0 # control the workers with no more work that can be assigned
print ("Master starting with {} workers".format(num_workers))
total_data = []
for entry in tasks:
total_data.append(entry)
try:
lowest_noe_energy = altutil.get_lowest_noe_energy(tasks)
except ZeroDivisionError:
killall(size)
exit()
print "Average lowest NOE energy is :", lowest_noe_energy
total_jobs, alt_sse_profile = altutil.compute_jobs(tasks)
while closed_workers < num_workers:
# Manage/distribute all processes in this while loop
data = comm.recv(source=MPI.ANY_SOURCE, tag=MPI.ANY_TAG, status=status)
source = status.Get_source()
tag = status.Get_tag()
if tag == tags.READY:
# worker process is ready, send some task to do.
if task_index < len(total_jobs):
t_job = total_jobs[task_index]
send_job = [tasks[t_job[0]], alt_sse_profile[t_job[1]], args.stage, task_index, lowest_noe_energy]
comm.send(send_job, dest=source, tag=tags.START)
task_index += 1 # increment its
else:
# everything is done, send exit signal
comm.send(None, dest=source, tag=tags.EXIT)
elif tag == tags.DONE:
# take the result from the worker
if data:
for hit in data:
total_data.append(hit)
ctime = time.time()
elapsed = ctime - stime
finished_task += 1
print "Finishing..", finished_task, "of", len(total_jobs), "Smotifs, Elapsed", round((elapsed) / (60), 2), "mins"
elif tag == tags.EXIT:
closed_workers += 1
# consolidate top_hits and dump files here
print "Total number of hits found are : ",len(total_data)
# ranked_data = rank_assembly(total_data, args.numhits)
"""
ranked_data = rank_assembly_with_clustering(total_data, args.numhits)
print len(ranked_data)
io.dumpGzipPickle(str(args.infile) + "_refined_tophits.gzip", ranked_data)
"""
print "All Done, Master exiting"
exit()
# On the worker processes
else:
while True: # initiate infinite loop
comm.send(None, dest=0, tag=tags.READY)
# Signal the master process that you are READY
work = comm.recv(source=0, tag=MPI.ANY_SOURCE, status=status)
tag = status.Get_tag()
if tag == tags.START:
result = alt_search.altSmotifSearch(work)
comm.send(result, dest=0, tag=tags.DONE)
elif tag == tags.EXIT:
# break the infinite loop because there is no more work that can be assigned
break
# Signal EXIT to the master process
comm.send(None, dest=0, tag=tags.EXIT) |
# This sample tests assignment expressions used within
# arguments
import collections
class NearestKeyDict(collections.UserDict):
def _keytransform(self, key):
a = len(candidate_keys := [k for k in sorted(self.data) if k >= key])
# This should generate an error because walrus operators
# are not allowed with named arguments.
b = list(iterable = candidate_keys := [k for k in sorted(self.data) if k >= key])
|
from flask_restful import Resource, Api, reqparse, abort
from flask import Response
from Logger.Control import global_control
import datetime, time, json, requests, redis
#
# SuperClass.
# ----------------------------------------------------------------------------
class Log_Control(object):
__controller = None
__redis = {'host':'localhost', 'port':6379, 'db':0}
def __init__(self):
self.__controller = global_control
def get_log_by_sender(self, sender=None):
success = 'success'
status = '200'
message = 'Logging Service, update log.'
log_returned = []
log_contents = self.__controller.get_log(sender)
if not log_contents in ([], None, ''):
for log in log_contents:
log_returned.append(
{
"sender":log[0],
"log-type":log[2],
"message":log[3],
"timestamp":log[1]
}
)
data = {"log":log_returned}
return self.__controller.do_response(message=message,
data=data,
status=status,
response=success)
def delete_log(self, json_string=None):
success = 'success'
status = '200'
message = 'Logging Service, clear log.'
data = None
try:
if json_string == None\
or json_string == '':
raise KeyError('No JSON Data passed')
json_data = json.loads(json_string)
key = json_data['key']
if not key == '1234-5678-9012-3456':
raise ValueError('Logging control key incorrect.')
self.__controller.delete_log()
data = {'log':[]}
except KeyError as ke:
success = 'error'
status = '400'
message = 'Badly formed request! Missing {0}'.format(str(ke))
self.__controller.log('INTERNAL',
'unexpected',
message=message,
timestamp=str(datetime.datetime.now())
)
except ValueError as ve:
message = str(ve)
status = 403
success = 'Error'
self.__controller.log('INTERNAL',
'unexpected',
message=message,
timestamp=str(datetime.datetime.now())
)
except Exception as e:
message = repr(e)
status = 500
success = 'error'
self.__controller.log('INTERNAL',
'unexpected',
message=message,
timestamp=str(datetime.datetime.now())
)
raise
return_value = self.__controller.do_response(message=message,
data=data,
status=status,
response=success)
return return_value
def update_log(self, json_string=None):
success = 'success'
status = '200'
message = 'Logging Service, update log.'
data = None
try:
if json_string == None\
or json_string == '':
raise KeyError('Badly formed request!')
json_data = json.loads(json_string)
try:
sender = json_data['sender']
log_type = json_data['log-type']
except:
raise
try:
text = json_data['message']
if text in ('', None):
raise KeyError('set timestamp')
except KeyError as ke:
text = 'No message; timestamp recorded.'
if sender in (None, '') \
or log_type in (None, ''):
raise ValueError(
'Value errors: sender and log-type cannot be null'
)
now = str(datetime.datetime.now())
redis_instance = redis.StrictRedis(**self.__redis)
redis_instance.publish(
'central_logger',
'{0}<<*>>{1}<<*>>{2}<<*>>{3}'.format(
sender,
log_type,
text,
now
)
)
data = {
"sender":sender,
"log-type":log_type,
"message":text,
"timestamp":now
}
print('Logging data: {0}'.format(data))
except Exception as e:
success = 'error'
status = '400'
message = repr(e)
self.__controller.log(
'LOGGER',
'INTERNAL ERROR',
repr(e),
str(datetime.datetime.now())
)
return self.__controller.do_response(message=message,
data=data,
status=status,
response=success)
global_log_control = Log_Control()
|
# Your company delivers breakfast via autonomous quadcopter drones. And something
# mysterious has happened.
# Each breakfast delivery is assigned a unique ID, a positive integer. When one of
# the company's 100 drones takes off with a delivery, the delivery's ID is added
# to a list, delivery_id_confirmations. When the drone comes back and lands, the ID
# is again added to the same list.
# After breakfast this morning there were only 99 drones on the tarmac. One of
# the drones never made it back from a delivery. We suspect a secret agent from
# Amazon placed an order and stole one of our patented drones. To track them down,
# we need to find their delivery ID.
# Given the list of IDs, which contains many duplicate integers and one unique
# integer, find the unique integer.
# The IDs are not guaranteed to be sorted or sequential. Orders aren't always
# fulfilled in the order they were received, and some deliveries get cancelled before takeoff.
# input=[1,2,3,4,2,3,4] #1
def find_unique_id(delivery_id_confirmations):
import collections
counts = collections.Counter(delivery_id_confirmations)
for k, v in counts.items():
if v == 1:
return k
# print(find_unique_id([1,2,3,4,2,3,4])) #1
# Runtime: O(n) time and space
def find_unique_id2(delivery_id_confirmations):
seen=set()
for value in delivery_id_confirmations:
if value in seen:
seen.remove(value)
else:
seen.add(value)
if seen:
print(seen)
return next(iter(seen)) #returns the next element in seen
# print(find_unique_id2([1,2,3,4,2,3,4])) #1
# Runtime: O(n) time and O(m) space
def find_unique_id_bitwise(delivery_ids):
unique_delivery_id = 0
for delivery_id in delivery_ids:
unique_delivery_id ^= delivery_id
# print(unique_delivery_id)
return unique_delivery_id
print(find_unique_id_bitwise([1,2,3,4,2,3,4])) #1
|
# encoding:utf-8
import os
import shutil
import struct
import binascii
import pdb
class Baidu(object):
def __init__(self, originfile, txt_file):
self.originfile = originfile
self.lefile = originfile + '.le'
self.txtfile = txt_file
self.buf = [b'0' for x in range(0, 2)]
self.listwords = []
# 字节流大端转小端
def be2le(self):
of = open(self.originfile, 'rb')
lef = open(self.lefile, 'wb')
contents = of.read()
contents_size = contents.__len__()
mo_size = (contents_size % 2)
# 保证是偶数
if mo_size > 0:
contents_size += (2 - mo_size)
contents += contents + b'0000'
# 大小端交换
for i in range(0, contents_size, 2):
self.buf[1] = contents[i]
self.buf[0] = contents[i + 1]
le_bytes = struct.pack('2B', self.buf[0], self.buf[1])
lef.write(le_bytes)
# print('写入成功转为小端的字节流')
of.close()
lef.close()
def le2txt(self):
lef = open(self.lefile, 'rb')
txtf = open(self.txtfile, 'w')
# 以字符串形式读取转成小端后的字节流,百度词典的起始位置为0x350
# le_bytes = lef.read().hex()[0x350:] # baidu
le_bytes = lef.read().hex()[0x2628:] # sougou
i = 0
while i < len(le_bytes):
result = le_bytes[i:i + 4]
i += 4
#将所有字符解码成汉字,拼音或字符
content = binascii.a2b_hex(result).decode('utf-16-be')
#判断汉字
if '\u4e00' <= content <= '\u9fff':
self.listwords.append(content)
else:
if self.listwords:
word = ''.join(self.listwords)
if len(word) > 1:
txtf.write('{}\n'.format(word))
# print(word)
self.listwords = []
# print('写入txt成功')
lef.close()
txtf.close()
def remove_repeat(out_path, ok_dict):
dct = {}
with open(ok_dict, 'w') as w:
for txt in os.listdir(out_path):
txt_path = os.path.join(out_path, txt)
with open(txt_path, 'r') as f:
for l in f.readlines():
name = l.strip('\n')
if name not in dct.keys():
dct[name] = 0
w.write('{} {}\n'.format(name, 201))
# print(dct.keys())
# print(l.strip('\n'))
def main():
# path = "./scel/地名词库.scel"
scel_path = "./scel/"
out_path = "./txt"
ok_dict = './ok.txt'
if os.path.isdir(out_path):
shutil.rmtree(out_path)
os.mkdir(out_path)
for f_name in os.listdir(scel_path):
# print(os.path.join(scel_path, f_name))
new_name = os.path.join(scel_path, f_name)
txt_name = os.path.join(out_path, f_name[:-4] + 'txt')
print(txt_name)
# pdb.set_trace()
bd = Baidu(new_name, txt_name)
bd.be2le()
bd.le2txt()
if os.path.isfile(new_name + '.le'):
os.remove(new_name + '.le')
remove_repeat(out_path, ok_dict)
if __name__ == '__main__':
main()
|
from networkn import NdexGraph
def create_two_egfr():
G = NdexGraph()
n1 = G.add_named_node('EGFR')
n2 = G.add_named_node('X1')
n3 = G.add_named_node('X2')
G.add_edge_between(n1,n2)
G.add_edge_between(n1,n3)
n4 = G.add_named_node('EGFR')
n5 = G.add_named_node('Y1')
n6 = G.add_named_node('Y2')
G.add_edge_between(n4, n5)
G.add_edge_between(n4, n6)
G.add_edge_between(n2, n5)
G.set_name('Two Symbol: EGFR')
G.upload_to('http://test.ndexbio.org', 'alignment', 'alignment')
def create_egfr():
G = NdexGraph()
n1 = G.add_named_node('EGFR')
n2 = G.add_named_node('X1')
n3 = G.add_named_node('X2')
G.add_edge_between(n1,n2)
G.add_edge_between(n1,n3)
G.set_name('Symbol: EGFR')
G.upload_to('http://test.ndexbio.org', 'alignment', 'alignment')
create_two_egfr() |
#!/usr/bin/env python
# RADIOLOGY ---------------------------------------------------
# This is an example script to upload images to Google Storage
# and MetaData to BigQuery. Data MUST be de-identified
import os
# Start google storage client for pmc-stanford
from som.api.google.bigquery import BigQueryClient as Client
client = Client(bucket_name='radiology-test',
project='som-langlotz-lab')
# Create/get BigQuery dataset
dataset = client.get_or_create_dataset('testing')
# Let's use the default dicom_schema
from som.api.google.bigquery.schema import dicom_schema
table = client.get_or_create_table(dataset=dataset,
table_name='dicomCookies',
schema=dicom_schema)
# Let's load some dummy data from deid
from deid.data import get_dataset
from deid.dicom import get_files
dicom_files = get_files(get_dataset('dicom-cookies'))
# Now de-identify to get clean files
from deid.dicom import get_identifiers, replace_identifiers
metadata = get_identifiers(dicom_files)
updated_files = replace_identifiers(dicom_files=dicom_files,
ids=metadata)
# Define some metadata for each entity and item
updates = { "item_id" : "cookieTumorDatabase",
"entity_id":"cookie-47",
"Modality": "cookie"}
for image_file in dicom_files:
if image_file in metadata:
metadata[image_file].update(updates)
else:
metadata[image_file] = updates
# Upload the dataset
client.upload_dataset(items=dicom_files,
table=table,
mimetype="application/dicom",
entity_key="entity_id",
item_key="item_id",
metadata=metadata)
|
from time import time
import warnings
import os
import subprocess
from DataHelper import ConfigManager
import cv2
import numpy as np
from math import sqrt
import tensorflow as tf
from scipy import interpolate
def getTFsess():
return tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(allow_growth=True)))
""" Texture Generation """
#
# def edge_detection(img, th=10):
# """
# detect the edge
# :param img: source image
# :param th: threshold to decide what is black
# :return: grad, edge_along_y, edge_along_x
# """
# src = img.copy()
# gray = cv2.cvtColor(src, cv2.COLOR_BGR2GRAY)
# grad_x = cv2.convertScaleAbs(
# cv2.Sobel(gray, cv2.CV_16S, 1, 0, ksize=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT))
# grad_y = cv2.convertScaleAbs(
# cv2.Sobel(gray, cv2.CV_16S, 0, 1, ksize=1, scale=1, delta=0, borderType=cv2.BORDER_DEFAULT))
# grad = cv2.addWeighted(grad_x, 0.5, grad_y, 0.5, 0)
# edge_along_y = np.zeros((img.shape[0], 2), dtype=np.uint16)
# edge_along_x = np.zeros((img.shape[1], 2), dtype=np.uint16)
# for _y in range(edge_along_y.shape[0]):
# temp = np.argwhere(grad_x[_y, :] > th)
# if np.any(temp):
# edge_along_y[_y, 0] = np.min(temp) + 1
# edge_along_y[_y, 1] = np.max(temp) - 1
# for _x in range(edge_along_x.shape[0]):
# temp = np.argwhere(grad_x[:, _x] > th)
# if np.any(temp):
# edge_along_x[_x, 0] = np.min(temp) + 1
# edge_along_x[_x, 1] = np.max(temp) - 1
# return grad, edge_along_y, edge_along_x
#
#
# def color_grad_2_pts_x(img, x0, x1, y, left_color, right_color):
# length = x1 - x0
# img[y, x0:x1, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# img[y, x0:x1, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# img[y, x0:x1, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# return img
#
#
# def color_grad_2_pts_y(img, y0, y1, x, left_color, right_color):
# length = y1 - y0
# img[y0:y1, x, 0] = np.fromfunction(
# lambda _x: left_color[0] * (1 - _x / length) + right_color[0] * (_x / length), (length,))
# img[y0:y1, x, 1] = np.fromfunction(
# lambda _x: left_color[1] * (1 - _x / length) + right_color[1] * (_x / length), (length,))
# img[y0:y1, x, 2] = np.fromfunction(
# lambda _x: left_color[2] * (1 - _x / length) + right_color[2] * (_x / length), (length,))
# return img
#
#
# def image_expansion_v2(img, internal=False):
# img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.float64)
# if internal:
# img[:, :, 2] *= 1.2
# img[:, :, 1] *= 0.8
# avg_color = img[img.sum(-1) > 0].mean(0)
# maskHSV = cv2.inRange(img, avg_color - np.array([10, 40, 25]), avg_color + np.array([10, 100, 50]))
# for i in range(maskHSV.shape[0]):
# t = maskHSV[i].nonzero()[0].flatten()
# if t.size > 1:
# maskHSV[i, t[0]:t[-1]] = 255
# resultHSV = cv2.bitwise_and(img, img, mask=maskHSV)
#
# new_img_x = resultHSV.copy().astype(np.float32)
# img = resultHSV
# for r in range(img.shape[0]):
# t = np.argwhere(img[r].sum(-1) > 0).flatten()
# if t.size > 0:
# left_edge = np.min(t) + 5
# right_edge = np.max(t) - 5
#
# while img[r, left_edge].sum(-1) <= 5:
# left_edge -= 1
# while img[r, right_edge].sum(-1) <= 5:
# right_edge += 1
# # left edge
# new_img_x = color_grad_2_pts_x(new_img_x, x0=0, x1=left_edge, y=r,
# left_color=img[r, left_edge] * 0.5 + avg_color * 0.5,
# right_color=img[r, left_edge])
#
# # right edge
# new_img_x = color_grad_2_pts_x(new_img_x, x0=right_edge, x1=img.shape[1], y=r,
# left_color=img[r, right_edge, :],
# right_color=img[r, right_edge, :] * 0.5 + avg_color * 0.5)
#
# # internal
# if internal:
# left_edge = np.min(t) - 5
# right_edge = np.max(t) + 5
# while img[r, left_edge].sum(-1) <= 5:
# left_edge += 1
# while img[r, right_edge].sum(-1) <= 5:
# right_edge -= 1
# new_img_x = color_grad_2_pts_x(new_img_x, x0=left_edge, x1=right_edge, y=r,
# left_color=new_img_x[r, left_edge],
# right_color=new_img_x[r, right_edge])
#
# new_img_y = new_img_x.copy().astype(np.float32)
# for c in range(new_img_y.shape[1]):
# t = np.argwhere(new_img_y[:, c, :].sum(-1) > 0).flatten()
# if t.size > 0:
# left_edge = np.min(t) + 5
# right_edge = np.max(t) - 5
# while new_img_y[left_edge, c].sum(-1) <= 5:
# left_edge -= 1
# while new_img_y[right_edge, c].sum(-1) <= 5:
# right_edge += 1
# # left edge
# new_img_y = color_grad_2_pts_y(new_img_y, y0=0, y1=left_edge, x=c,
# left_color=new_img_y[left_edge, c] * 0.5 + avg_color * 0.5,
# right_color=new_img_y[left_edge, c])
# new_img_x = color_grad_2_pts_y(new_img_x, y0=0, y1=left_edge, x=c,
# left_color=new_img_y[left_edge, c] * 0.5 + avg_color * 0.5,
# right_color=new_img_y[left_edge, c])
#
# # right edge
# new_img_y = color_grad_2_pts_y(new_img_y, y0=right_edge, y1=img.shape[0], x=c,
# left_color=new_img_y[right_edge, c, :],
# right_color=new_img_y[right_edge, c, :] * 0.5 + avg_color * 0.5)
# new_img_x = color_grad_2_pts_y(new_img_x, y0=right_edge, y1=img.shape[0], x=c,
# left_color=new_img_y[right_edge, c, :],
# right_color=new_img_y[right_edge, c, :] * 0.5 + avg_color * 0.5)
# if internal:
# left_edge = np.min(t) - 5
# right_edge = np.max(t) + 5
# while new_img_y[left_edge, c].sum(-1) <= 5:
# left_edge += 1
# while new_img_y[right_edge, c].sum(-1) <= 5:
# right_edge -= 1
# new_img_y = color_grad_2_pts_y(new_img_y, y0=left_edge, y1=right_edge, x=c,
# left_color=new_img_y[left_edge, c, :],
# right_color=new_img_y[right_edge, c, :] * 0.5 + avg_color * 0.5)
# img_recover = cv2.addWeighted(new_img_x, 0.5, new_img_y, 0.5, 0)
# img_recover = img_recover.round().clip(0, 255).astype(np.uint8)
# img_recover = cv2.cvtColor(img_recover, cv2.COLOR_HSV2BGR)
# return img_recover
# def image_expansion(img, internal=False):
# # new_img = np.zeros_like(img, dtype=np.float32)
# new_img = img.copy().astype(np.float32)
# _, edges_along_y, edges_along_x = edge_detection(img, th=5)
# color = np.mean(img[np.argwhere(edges_along_y[:, 0] > 0), img.shape[1] // 2, :], axis=0)
# for _y, (_x0, _x1) in enumerate(edges_along_y):
# if _x0 != 0 and _x1 != img.shape[1]:
# # appends edges
# new_img[_y, -1, :] = new_img[_y, 0, :] = color
# # left edge
# length = _x0
# left_color = new_img[_y, 0, :]
# right_color = new_img[_y, _x0, :]
# new_img[_y, :_x0, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# new_img[_y, :_x0, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# new_img[_y, :_x0, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # right edge
# length = img.shape[1] - _x1
# left_color = new_img[_y, _x1, :]
# right_color = new_img[_y, -1, :]
# new_img[_y, _x1:, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# new_img[_y, _x1:, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# new_img[_y, _x1:, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # internal
# if internal:
# length = _x1 - _x0
# left_color = new_img[_y, _x0, :]
# right_color = new_img[_y, _x1, :]
# new_img[_y, _x0:_x1, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# new_img[_y, _x0:_x1, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# new_img[_y, _x0:_x1, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # end of x padding
# # _, _, edges_along_x = edge_detection(new_img, th=5)
# color = np.mean(img[img.shape[0] // 2, np.argwhere(edges_along_x[:, 0] > 0), :], axis=0)
# # for _x, (_y0, _y1) in enumerate(edges_along_x):
# _y0 = np.argwhere(edges_along_y[:, 0] > 0).min()
# _y1 = np.argwhere(edges_along_y[:, 0] > 0).max()
# for _x in range(img.shape[1]):
# if _y0 != 0 and _y1 != img.shape[0]:
# # appends edges
# new_img[0, _x, :] = new_img[-1, _x, :] = color
# # left edge
# length = _y0
# left_color = new_img[0, _x, :]
# right_color = new_img[_y0, _x, :]
# new_img[:_y0, _x, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# new_img[:_y0, _x, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# new_img[:_y0, _x, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # right edge
# length = img.shape[0] - _y1
# left_color = new_img[_y1, _x, :]
# right_color = new_img[-1, _x, :]
# new_img[_y1:, _x, 0] = np.fromfunction(
# lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# new_img[_y1:, _x, 1] = np.fromfunction(
# lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# new_img[_y1:, _x, 2] = np.fromfunction(
# lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # # internal
# # length = _y1 - _y0
# # left_color = new_img[_y0, _x, :]
# # right_color = new_img[_y1, _x, :]
# # new_img[_y0:_y1, _x, 0] = np.fromfunction(
# # lambda x: left_color[0] * (1 - x / length) + right_color[0] * (x / length), (length,))
# # new_img[_y0:_y1, _x, 1] = np.fromfunction(
# # lambda x: left_color[1] * (1 - x / length) + right_color[1] * (x / length), (length,))
# # new_img[_y0:_y1, _x, 2] = np.fromfunction(
# # lambda x: left_color[2] * (1 - x / length) + right_color[2] * (x / length), (length,))
# # end of y padding
# new_img = new_img.round().clip(0, 255).astype(np.uint8)
# return new_img
def process_copy(img_full,img_half):
for _y in range(img_full.shape[0]):
t = np.argwhere(img_half[_y].sum(-1) > 0).flatten()
if t.size > 0:
k = 4
left_edge = np.min(t)
right_edge = np.max(t)
img_half[_y, left_edge:right_edge] = img_full[_y, left_edge:right_edge]
return img_half
def genText(img_path_full, img_path_half, output_head_path, output_mask_path, size=None):
assert size is None or (type(size) == tuple and len(size) == 3)
img_full = cv2.imread(img_path_full)
img_half = cv2.imread(img_path_half)
img = process_copy(img_full, img_half)
if size is None:
size = img.shape
new_img = np.zeros(size, dtype=np.uint8)
internal = (
(size[0] * 3 // 5 - img.shape[0] // 2), (size[0] * 3 // 5 + img.shape[0] // 2), (size[1] - img.shape[1]) // 2,
(size[1] + img_full.shape[1]) // 2)
new_img[internal[0]:internal[1], internal[2]:internal[3], :] = img
img = image_expansion_execute(new_img)
cv2.imwrite(output_head_path, img)
img_mask = img[internal[0]:internal[1], internal[2]:internal[3], :].copy()
cv2.imwrite(output_mask_path, img_mask)
return
def image_expansion_execute(img_BGR):
img_HSV = cv2.cvtColor(img_BGR, cv2.COLOR_BGR2HSV).astype(np.float64)
avg_color = img_HSV[np.logical_and(img_HSV.sum(-1) > 30, img_HSV.sum(-1) < 700)].mean(0)
maskHSV = cv2.inRange(img_HSV, avg_color - np.array([20, 35, 35], dtype=np.float64),
avg_color + np.array([20, 35, 35], dtype=np.float64))
masked_HSV = cv2.bitwise_and(img_HSV, img_HSV, mask=maskHSV)
avg_color = img_HSV[np.logical_and(masked_HSV.sum(-1) > 30, masked_HSV.sum(-1) < 700)].mean(0)
maskHSV = cv2.inRange(img_HSV, avg_color - np.array([20, 30, 30], dtype=np.float64),
avg_color + np.array([20, 30, 30], dtype=np.float64))
for i in range(maskHSV.shape[0]):
t = maskHSV[i].nonzero()[0].flatten()
if t.size > 1:
maskHSV[i, t[0]:t[-1]] = 255
masked_HSV = cv2.bitwise_and(img_HSV, img_HSV, mask=maskHSV)
# set img
new_img = masked_HSV.copy().astype(np.float32)
left_edge = np.zeros(masked_HSV.shape[0], dtype=np.uint32)
right_edge = np.full(masked_HSV.shape[0], img_HSV.shape[1], dtype=np.uint32)
for _y in range(masked_HSV.shape[0]):
t = np.argwhere(masked_HSV[_y].sum(-1) > 0).flatten()
if t.size > 0:
k = 4
left_edge[_y] = np.min(t) + k
right_edge[_y] = np.max(t) - k
# left_edge__y_ = left_edge[_y] // 2
# right_edge__y_ = (new_img.shape[1] + right_edge[_y]) // 2
left_edge__y_ = max(0, left_edge[_y] - 30)
right_edge__y_ = min(new_img.shape[1], right_edge[_y] + 30)
kind = "slinear"
x_fit = np.concatenate(([left_edge__y_], np.arange(left_edge[_y], left_edge[_y] + k)), 0)
y_fit = np.concatenate((avg_color.reshape(1, 3), new_img[_y, left_edge[_y]:left_edge[_y] + k, :]), 0)
fl = interpolate.interp1d(x_fit, y_fit, kind=kind, axis=0, fill_value="extrapolate")
x_fit = np.concatenate(
([right_edge__y_], np.arange(right_edge[_y] - k, right_edge[_y])), 0)
y_fit = np.concatenate((avg_color.reshape(1, 3), new_img[_y, right_edge[_y] - k: right_edge[_y], :]), 0)
fr = interpolate.interp1d(x_fit, y_fit, kind=kind, axis=0, fill_value="extrapolate")
new_img[_y, left_edge__y_:left_edge[_y], :] = fl(np.arange(left_edge__y_, left_edge[_y])).clip(0,
255)
new_img[_y, right_edge[_y]:right_edge__y_, :] = fr(
np.arange(right_edge[_y], right_edge__y_)).clip(0, 255)
new_img[_y, :left_edge__y_, :] = avg_color
new_img[_y, right_edge__y_:, :] = avg_color
for _y in range(new_img.shape[0] - 1):
for _x in reversed(range(0, left_edge[_y])):
new_img[_y, _x] = 0.33 * new_img[_y - 1, _x] + 0.34 * new_img[_y, _x + 1] + 0.33 * new_img[
_y + 1, _x]
for _x in range(right_edge[_y], new_img.shape[1]):
new_img[_y, _x] = 0.33 * new_img[_y - 1, _x] + 0.34 * new_img[_y, _x - 1] + 0.33 * new_img[
_y + 1, _x]
up_edge = np.zeros(img_HSV.shape[1], dtype=np.uint32)
down_edge = np.full(img_HSV.shape[1], img_HSV.shape[0], dtype=np.uint32)
for _x in range(img_HSV.shape[1]):
t = np.argwhere(new_img[:, _x, :].sum(-1) > 0).flatten()
if t.size > 0:
k = 4
up_edge[_x] = np.min(t) + k
down_edge[_x] = np.max(t) - k
# up_edge__x_ = up_edge[_x] // 2
# down_edge__x_ = (new_img.shape[0] + down_edge[_x]) // 2
up_edge__x_ = max(0, up_edge[_x] - 30)
down_edge__x_ = min(new_img.shape[0], down_edge[_x] + 30)
k = 1
kind = "slinear"
x_fit = np.concatenate(([up_edge__x_], np.arange(up_edge[_x], up_edge[_x] + k)), 0)
y_fit = np.concatenate((avg_color.reshape(1, 3), new_img[up_edge[_x]:up_edge[_x] + k, _x, :]), 0)
fl = interpolate.interp1d(x_fit, y_fit, kind=kind, axis=0, fill_value="extrapolate")
x_fit = np.concatenate(
([down_edge__x_], np.arange(down_edge[_x] - k, down_edge[_x])), 0)
y_fit = np.concatenate((avg_color.reshape(1, 3), new_img[down_edge[_x] - k: down_edge[_x], _x, :]), 0)
fr = interpolate.interp1d(x_fit, y_fit, kind=kind, axis=0, fill_value="extrapolate")
new_img[up_edge__x_:up_edge[_x], _x, :] = fl(np.arange(up_edge__x_, up_edge[_x])).clip(0, 255)
new_img[down_edge[_x]:down_edge__x_, _x, :] = fr(
np.arange(down_edge[_x], down_edge__x_)).clip(0, 255)
new_img[:up_edge__x_, _x, :] = avg_color
new_img[down_edge__x_:, _x, :] = avg_color
for _x in range(new_img.shape[1] - 1):
for _y in reversed(range(0, up_edge[_x])):
new_img[_y, _x] = 0.33 * new_img[_y, _x - 1] + 0.34 * new_img[_y + 1, _x] + 0.33 * new_img[
_y, _x + 1]
for _y in range(down_edge[_x], new_img.shape[0]):
new_img[_y, _x] = 0.33 * new_img[_y, _x - 1] + 0.34 * new_img[_y - 1, _x] + 0.33 * new_img[
_y, _x + 1]
out_img = new_img.round().clip(0, 255).astype(np.uint8)
out_img_BGR = hsv2bgr(out_img)
# display(np.concatenate((img_BGR, hsv2bgr(masked_HSV), out_img_BGR), axis=1))
return out_img_BGR
"""Call Blender"""
def blender_wrapper(blender_file, script_file_path, input_data, texture, hair, mask, output, gen_hair, hair_color, background=True):
# LOAD CONFIG FILE
configManager = ConfigManager('.\\config.ini')
keyAndValue = {}
keyAndValue['INPUT_DATA'] = input_data
keyAndValue['TEXTURE_DATA'] = texture
keyAndValue['HAIR_DATA'] = hair
keyAndValue['MASK_DATA'] = mask
keyAndValue['OUT_DATA'] = output
keyAndValue['HAIR'] = gen_hair
keyAndValue['HAIR_COLOR'] = hair_color if gen_hair else [0, 0, 0]
configManager.addPairs(keyAndValue)
# SAVE CONFIG FILE
blender = r".\\Blender\\blender.exe"
if os.path.exists(blender):
if background:
cmd = "{} -b {} -P {}".format(blender, blender_file, script_file_path)
else:
cmd = "{} {} -P {}".format(blender, blender_file, script_file_path)
print("Running cmd: ", cmd)
try:
return_code = subprocess.call(cmd.split(' '), shell=True)
if return_code:
raise Exception("Unknown Error for blender_wrapper")
except Exception as e:
print(e)
print("CMD:")
print(cmd.split(' '))
else:
print("\tBlender not found")
print("\tMake a Symbolic Link of Blender root folder BY")
print("\tWindows: System console")
print("\t\t cd <PROJECT FOLDER>")
print("\t\t mklink /D <BLNDER FOLDER> Blender")
print("\n\tLinux/Mac: bash terminal")
print("\t\t cd <PROJECT FOLDER>")
print("\t\t sudo ln -s <BLNDER FOLDER> Blender")
return
"""Utility"""
# def dist(pt1, pt2):
# return sqrt((pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2)
def hsv2bgr(img):
return cv2.cvtColor(img.clip(0, 255).astype(np.uint8), cv2.COLOR_HSV2BGR)
def display(img, name="Img", time=0, encode="BGR"):
if type(img) != np.ndarray:
if type(img) == str:
img = cv2.imread(img)
else:
raise TypeError("Should be img (numpy.ndarray) or img path (str), but {} found".format(type(img)))
if not isinstance(img, np.ndarray):
img = cv2.imread(img)
encode = "BGR"
if img.ndim == 3:
img = img[..., [encode.find('B'), encode.find('G'), encode.find('R')]] # to BGR
cv2.imshow(name, img)
cv2.waitKey(time)
cv2.destroyAllWindows()
def time_it_wrapper(callback, name="", args=(), kwargs={}):
print(name, ": ")
start_time = time()
temp = None
if callback:
temp = callback(*args, **kwargs)
print("\ttime={:.2f}s".format(time() - start_time))
return temp
def main(img_path = None, hair_data=None):
"""
Main
:return:
"""
global_start = time()
"""Import constants from config file"""
configManager = ConfigManager('.\\config.ini')
json_data = configManager.getAll()
DIR_INPUT = json_data["DIR_INPUT"]
DIR_TEXTURE = json_data["DIR_TEXTURE"]
DIR_MASK = json_data["DIR_MASK"]
DIR_OUT = json_data["DIR_OUT"]
INPUT_DATA = json_data["INPUT_DATA"]
TEXTURE_DATA = json_data["TEXTURE_DATA"]
HAIR_DATA = json_data["HAIR_DATA"]
MASK_DATA = json_data["MASK_DATA"]
OUT_DATA = json_data["OUT_DATA"]
HAIR = json_data["HAIR"]
HAIR_COLOR = json_data.get('HAIR_COLOR', [0, 0, 0])
BLENDER_BACKGROUND = json_data["BLENDER_BACKGROUND"]
if img_path is not None:
INPUT_DATA = json_data['INPUT_DATA'] = img_path
TEXTURE_DATA = json_data["TEXTURE_DATA"] = img_path
MASK_DATA = json_data["MASK_DATA"] = "{}.obj".format(img_path[:-4])
OUT_DATA = json_data["OUT_DATA"] = "{}.obj".format(img_path[:-4])
configManager.addPairs(json_data)
assert os.path.exists(os.path.join(DIR_INPUT, img_path))
if hair_data is not None:
HAIR_DATA = json_data["HAIR_DATA"] = hair_data
configManager.addPairs(json_data)
"""Setup"""
warnings.filterwarnings("ignore")
print("Importing packages: ")
start_time = time()
from PRNet.myPRNET import genPRMask
print("\ttime={:.2f}s".format(time() - start_time))
"""END"""
"""Geometry"""
time_it_wrapper(None, "Generating Geometry")
"""Mask"""
time_it_wrapper(genPRMask, "Generating Mask", args=(
os.path.join(DIR_INPUT, INPUT_DATA),
DIR_MASK),
kwargs={'isMask': False})
"""Texture"""
time_it_wrapper(genText, "Generating Texture", args=(
os.path.join(DIR_MASK, "{}_texture.png".format(MASK_DATA[:-4])), # input full
os.path.join(DIR_MASK, "{}_texture_2.png".format(MASK_DATA[:-4])), # input half
os.path.join(DIR_TEXTURE, TEXTURE_DATA), # output texture for head
os.path.join(DIR_MASK, "{}_texture.png".format(MASK_DATA[:-4])), # output texture for mask
(512, 512, 3)
))
"""Alignment"""
time_it_wrapper(blender_wrapper, "Alignment", args=(
".\\new_geometry.blend",
".\\blender_script\\geo.py",
INPUT_DATA,
TEXTURE_DATA,
HAIR_DATA,
MASK_DATA,
OUT_DATA,
True,
HAIR_COLOR,
False))
print("Output to: {}".format(os.path.join(os.getcwd(), DIR_OUT, OUT_DATA)))
print("Total_time: {:.2f}".format(time() - global_start))
return
def test_texture(img_path_full, img_path_half):
"""Import constants from config file"""
img_full = cv2.imread(img_path_full)
img_half = cv2.imread(img_path_half)
img = process_copy(img_full, img_half)
size = (512, 512, 3)
new_img = np.zeros(size, dtype=np.uint8)
internal = (
(size[0] * 3 // 5 - img.shape[0] // 2), (size[0] * 3 // 5 + img.shape[0] // 2), (size[1] - img.shape[1]) // 2,
(size[1] + img_full.shape[1]) // 2)
new_img[internal[0]:internal[1], internal[2]:internal[3], :] = img
img = image_expansion_execute(new_img)
img_mask = img[internal[0]:internal[1], internal[2]:internal[3], :].copy()
if __name__ == '__main__':
main('orbo.jpg', "strands00357.data")
test_texture("Data/mask/orbo_texture_3.png", "Data/mask/orbo_texture_2.png")
|
#numbers=["3","34","64"]
"""
for i in range(len(numbers)):
numbers[i]=int(numbers[i])
numbers[2]=numbers[2]+1
print(numbers[2])
"""
#it is very lengthy
#so here we use map , for loop ki jgh pr
"""numbers=list(map(int,numbers))
numbers[2]=numbers[2]+1
def sq(a):
return a*a
num=[2,3,4,5,6,7]
square=list(map(sq,num))
square=list(map(lambda x: x*x , num))
print(square)
"""
"""
#---------------------------------MAP------------------------------------
def square(a):
return a*a
def cube(a):
return a*a*a
func=[square,cube]
for i in range(5):
val=list(map(lambda x:x(i),func))
print(val)
"""
"""
#----------------------------------------FILTER-----------------------------
list1=[1,2,3,4,5,6,8,9,55,66,42,25]
def is_greater_5(num):
return num>5
gr_than_5=list(filter(is_greater_5,list1)) #filter function aisi list bnata h elements ki jo true return krta h
print(gr_than_5)
"""
#-----------------------------REDUCE-----------------------------------------
from functools import reduce #reduce is a part of functools module
list3=[1,2,3,4]
num= reduce(lambda x,y:x+y,list3)
num1=reduce(lambda x,y:x*y,list3)
print(num)
print(num1)
|
# **************************************************************************
# Author: João V. Tristão
# Date: 16/12/2019
# Problem: Digit factorial chains
# Approach:
# - Brute force
#
# **************************************************************************
import numpy as np
import math as mt
def fact_sum(n):
number = str(n)
sum_ = 0
for i in number:
sum_ += mt.factorial(int(i))
return sum_
def get_sequence(cur):
seq = []
while cur not in seq:
seq.append(cur)
cur = fact_sum(cur)
return seq
n = 1000000
seq_len = {}
print("Processing(Takes 1 min)....")
for i in range(n):
seq = get_sequence(i)
size = len(seq)
if size in seq_len:
seq_len[size] += 1
else:
seq_len[size] = 1
print("Number of chains :", seq_len[60]) |
try:
from django.conf.urls.defaults import patterns, url
except ImportError:
from django.conf.urls import patterns, url
urlpatterns = patterns("notification.views",
url(r"^settings/$", 'notice_settings', name="notification_notice_settings"),
url(r"^mark_seen/(?P<notice_id>\d+)/$", 'mark_seen', name="notification_mark_seen"),
url(r"^mark_all_seen/$", 'mark_all_seen', name="notification_mark_all_seen"),
)
|
#TRON by Taylor Poulos
#AndrewID: tpoulos
#email: poulos.taylor.w@gmail.com
#Created in Nov-Dec 2012
#15-112 Term Project
#These functions create the cycles on the board
####################
#IMPORTS
####################
import pygame
from pygame.locals import *
import config
import random
####################
#GameWindow Class
####################
#A GameWindow is the basic PyGame window the game sits in
class GameWindow(object):
def __init__(self):
self.mainClock = pygame.time.Clock()
self.display = pygame.display.set_mode(config.SCREEN_SIZE)
pygame.display.set_caption('Tron')
pygame.mouse.set_visible(False)
if(config.FULLSCREEN == True):
#Makes the game fullscreen
pygame.display.set_mode((config.WINDOW_WIDTH,
config.WINDOW_HEIGHT),
pygame.DOUBLEBUF | pygame.FULLSCREEN)
self.initSounds()
def initSounds(self):
self.background = pygame.Surface(config.SCREEN_SIZE)
self.mixer = pygame.mixer
self.upSound = pygame.mixer.Sound("sounds/menu_up.ogg")
self.upSound.set_volume(0.3)
self.downSound = pygame.mixer.Sound("sounds/menu_down.ogg")
self.downSound.set_volume(0.3)
self.selectSound = pygame.mixer.Sound("sounds/menu_select.ogg")
####################
#GameSurface Class
####################
#A GameSurface is a surface the actual game is rendered on
class GameSurface(object):
def __init__(self, window, top, left, pixelLength):
#Marks the upper left of the game window
self.top, self.left = top, left
self.surface = pygame.Surface((pixelLength, pixelLength))
self.sideLength = pixelLength
self.margin = int(round(self.sideLength * config.OUTLINE_RATIO))
self.window = window
self.board = None
def setBoard(self, board):#Sets the board to the game board
#Because the game board is created slightly after the game surface
self.board = board
self.rows = board.rows
self.cols = board.cols
self.cellLength = self.sideLength / float(board.rows)
cellFontSize =float(self.sideLength)/self.board.rows/config.CELL_SCALE
self.cellFont = pygame.font.Font("fonts/cour.ttf", int(cellFontSize))
cycleFontSize = cellFontSize*config.CYCLE_SCALE
self.cycleFont = pygame.font.Font("fonts/cour.ttf",int(cycleFontSize))
def redrawAll(self):#Redraws the board and all the cycles to the background
self.window.background.fill(config.BACKGROUND_COLOR)
self.drawMargin()
for row in xrange(self.board.rows):#Draws the board
for col in xrange(self.board.cols):
self.drawCell(self.board.board[row][col])
self.window.background.blit(self.surface, (self.top,self.left))
self.window.display.blit(self.window.background, (0,0))
pygame.display.flip()
self.surface.fill(config.BACKGROUND_COLOR)#Fill the background color
#last, because the cycles draw before this function and if we fill it
#at the beginning of the function the cycles are drawn over
def drawMargin(self):#Draw an outline for the board
pygame.draw.rect(self.surface, config.MARGIN_COLOR,
(0, 0, self.margin, self.sideLength))
pygame.draw.rect(self.surface, config.MARGIN_COLOR,
(0, 0, self.sideLength, self.margin))
pygame.draw.rect(self.surface, config.MARGIN_COLOR,
(0, self.sideLength - self.margin,
self.sideLength, self.sideLength))
pygame.draw.rect(self.surface, config.MARGIN_COLOR,
(self.sideLength - self.margin, 0,
self.sideLength, self.sideLength))
def updateDisplay(self):#A wrapper for the pygame update display class
pygame.display.update()
def drawCell(self, cell):#Draw each cel
if(cell.turns > -1):
cell.incrementTurns()
x = int(round(cell.col*self.cellLength +
float(self.cellLength)/config.CELL_SCALE/2))
y = int(round(cell.row*self.cellLength +
float(self.cellLength)/config.CELL_SCALE/2))
for i in xrange(2):#Stacks 2 chars on top of eachother for looks
trail = self.cellFont.render(
self.randomCharacter(cell.direction), True, cell.color)
self.surface.blit(trail, (x,y))
#Give a random character based on what is being drawn
def randomCharacter(self, type):
if(type == "vertical"):
return random.choice(config.VERTICAL_CHARACTERS)
elif(type == "cycleUp"):
return config.CYCLE_CHARACTERS[0]
elif(type == "cycleDown"):
return config.CYCLE_CHARACTERS[1]
elif(type == "cycleLeft"):
return config.CYCLE_CHARACTERS[2]
elif(type == "cycleRight"):
return config.CYCLE_CHARACTERS[3]
elif(type == "cycleDead"):
return config.CYCLE_CHARACTERS[4]
else:
return random.choice(config.HORIZONTAL_CHARACTERS)
def drawCycle(self, cycle):#Draw the cycle
cellPadding = float(self.cellLength)/config.CELL_SCALE/2
cycleOffset = cellPadding*(config.CYCLE_SCALE/2)
x = int(round(cycle.col*self.cellLength + cellPadding - cycleOffset))
y = int(round(cycle.row*self.cellLength + cellPadding - cycleOffset))
y -= int(float(self.sideLength)/self.board.rows/
config.CELL_SCALE*config.CYCLE_SCALE/8)
if(cycle.isLive == True):
if(cycle.drow == -1):#The cycle is moving up
cycleImage = self.cycleFont.render(
self.randomCharacter("cycleUp"), True, cycle.color)
elif(cycle.drow == 1):#The cycle is moving down
cycleImage = self.cycleFont.render(
self.randomCharacter("cycleDown"), True, cycle.color)
elif(cycle.dcol == -1):#The cycle is moving left
#The characters we're using for left and right are a little
#shorter than the other characters therefore, we need to move
#them up a little to compensate
cycleImage = self.cycleFont.render(
self.randomCharacter("cycleLeft"), True, cycle.color)
elif(cycle.dcol == 1):#The cycle is moving right
cycleImage = self.cycleFont.render(
self.randomCharacter("cycleRight"), True, cycle.color)
else:
cycleImage = self.cycleFont.render(
self.randomCharacter("cycleDead"), True, cycle.color)
self.surface.blit(cycleImage, (x,y)) |
import tarfile
import os
with tarfile.open('/opt/bacnobackup/backup.sql.tar.gz', "w:gz") as tar:
tar.add('/opt/bancodump/backup.sql', arcname=os.path.basename('/opt/bancodump/backup.sql'))
|
import tables
class EventIndex(tables.IsDescription):
"""An IOTile Stream (timeseries data)."""
timestamp = tables.Int64Col()
event_id = tables.Int64Col()
event_index = tables.Int64Col()
|
import zmq
import time
import socket
import struct
class TokenBucket(object):
"""An implementation of the token bucket algorithm from http://code.activestate.com/recipes/511490-implementation-of-the-token-bucket-algorithm/
>>> bucket = TokenBucket(80, 0.5)
>>> print bucket.consume(10)
True
>>> print bucket.consume(90)
False
"""
def __init__(self, tokens, fill_rate):
"""tokens is the total tokens in the bucket. fill_rate is the
rate in tokens/second that the bucket will be refilled."""
self.capacity = float(tokens)
self._tokens = float(0)
self.fill_rate = float(fill_rate)
self.timestamp = time.time()
self.overflowcount = 0;
def consume(self, tokens):
"""Consume tokens from the bucket. Returns True if there were
sufficient tokens otherwise False."""
if tokens <= self.tokens:
self._tokens -= tokens
else:
return False
return True
def get_tokens(self):
now = time.time()
if self._tokens < self.capacity:
delta = self.fill_rate * (now - self.timestamp)
if self.capacity < self._tokens + delta:
print("Bucket overflow! %d: %d" % (self.overflowcount, self.capacity -( self._tokens + delta)))
self.overflowcount += 1
self._tokens = min(self.capacity, self._tokens + delta)
self.timestamp = now
return self._tokens
tokens = property(get_tokens)
NUM_FRAMES_PER_ZMQ_MESSAGE = 4
PACKET_SIZE = 6144 * NUM_FRAMES_PER_ZMQ_MESSAGE #Bytes
#DATA_RATE = 256000 #Byte/s
DATA_RATE = 256*1000 #Byte/s
if __name__ == "__main__":
data = bytearray()
for i in range(6144 * 4):
#data.append(0b11111111)
data.append(0b00000000)
tb = TokenBucket(PACKET_SIZE * 4, DATA_RATE)
# uint32_t version;uint16_t buflen[4]; uint8_t buf[NUM_FRAMES_PER_ZMQ_MESSAGE*6144];
print("Creating context...")
zmq_ctx = zmq.Context.instance()
print("Creating socket")
s = zmq_ctx.socket(zmq.PUB)
#port = s.bind_to_random_port("tcp://*")
s.bind("tcp://*:50473")
print("Publishing on port %d" % 50473)
i = 0
try:
while True:
if tb.consume(PACKET_SIZE):
packet = struct.pack("IHHHH" + str(len(data)) + "s", i % 2048, 1, 1, 1, 1, data)
i = (i + 1) % 2048
s.send(packet)
else:
time.sleep(24*10**-3 / 4)
except Exception as e:
print(e)
s.close() |
from flask import url_for
from flask import render_template
from flask import Flask
from flask import request
from flask import redirect
from flask import session
from flask import g
import sqlite3
from flask import flash
DATABASE = "blog.db"
app = Flask(__name__)
app.secret_key = b'7H{&\xa3\x92\xed\x86\xbe\xd4\x06U\xb1\x87s\xeb\x12Q\xd7\xf8Z/j\x92'
@app.route("/")
def welcome_page():
return render_template("index.html")
@app.route("/admin/")
def admin():
if "logged" not in session:
return redirect(url_for("login"))
else:
return render_template("admin.html")
@app.route("/about/")
def about():
return render_template("about.html")
@app.route("/articles/", methods=["GET"])
def articles():
db = get_db()
cur = db.execute("select * from articles order by id desc")
articles = cur.fetchall()
return render_template("articles.html", articles=articles)
@app.route("/articles/", methods=["POST"])
def add_articles():
db = get_db()
db.execute("insert into articles (title, content) values (?, ?)",[request.form.get("title"), request.form.get("content")])
db.commit()
return redirect(url_for("articles"))
@app.route("/articles/<int:art_id>")
def article(art_id):
db = get_db()
cur = db.execute("select * from articles where id=(?)", [art_id])
article = cur.fetchone()
if article:
return render_template("article.html", article=article)
return render_template("notfound.html", id=art_id)
@app.route("/login/", methods=["GET"])
def login():
return render_template("login.html")
@app.route("/login/", methods=["POST"])
def login_user():
username = request.form["username"]
password = request.form["password"]
if username == "admin" and password == "admin":
session["logged"]=True
flash("Loggin Succesfull")
return redirect(url_for("admin"))
else:
flash("wrong login")
return redirect(url_for("login"))
@app.route("/logout/", methods=["POST"])
def logout():
session.pop("logged")
flash("Logout Success")
return redirect(url_for("welcome_page"))
def connect_db():
rv = sqlite3.connect("DATABASE")
rv.row_factory = sqlite3.Row
return rv
def get_db():
if not hasattr(g, "sqlite_db"):
g.sqlite_db = connect_db()
return g.sqlite_db
@app.teardown_appcontext
def close_db(error):
if hasattr(g, "sqlite_db"):
g.sqlite_db.close()
def init_db(appp):
with appp.app_context():
db = get_db()
with open("mdblog/schema.sql", "r") as fp:
db.cursor().executescript(fp.read())
db.commit()
|
import os
from flask import request, Blueprint, make_response
from sqlescapy import sqlescape
from bcrypt import checkpw
import jwt
from ..models import db, User
from util import have
login_routes = Blueprint('login', __name__)
@login_routes.route('/login', methods=['POST'])
def login():
data = request.json
if not have(data, ['user', 'pass']):
return make_response({"error":
'User name or password wasn\'t provided'}, 400)
user, password = sqlescape(data['user']), data['pass'].encode()
user: User = User.query.filter_by(name=user).first()
if user is None:
return make_response({"error": "User doesn't exists"}, 401)
if not checkpw(password, user.passwd.encode('utf-8')):
return make_response({"error": "Password is incorrect"}, 401)
secret = os.getenv('PRIVATE_KEY')
if secret is None:
print('Private key is not set!!!')
return make_response({"error": "Private key is not set"}, 500)
encoded_jwt = jwt.encode({'user': user.name}, secret, algorithm='HS256')
return {"token": encoded_jwt.decode()}
|
from flask import Flask, render_template, redirect, request, flash
app = Flask(__name__)
app.secret_key= 'sfljk32fn'
@app.route('/')
def index():
return render_template("index.html")
@app.route('/process', methods=['POST'])
def create_user():
print "Got User"
name=request.form['namey']
location=request.form['location']
language=request.form['language']
comments=request.form['comments']
if len(name)>0:
# flash("Success!")
if len(comments)>120:
flash("Comments cannot be longer than 120 characters")
return render_template('index.html')
else:
return render_template('info.html', name=name, location=location, language=language, comments=comments)
else:
flash("Name cannot be empty")
print
return render_template('index.html')
app.run(debug=True) |
#!/usr/bin/python
cols = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F']
cols1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
txt = open("color.htm", "w")
txt.write("""<html>
<head><title>Color Chart</title></head>
<body>
<center><h1>Color Chart</h1></center><br><br>
<table margin = "1" width="100%">
<thead>
<tr><th><h2>Hex</h2></th><th><h2>Color</h2></th><th><h2>Hex</h2></th><th><h2>Color</h2></th><th><h2>Hex</h2></th><th><h2>Color</h2></th><th><h2>Hex</h2></th><th><h2>Color</h2></th></tr>
</thead>
<tbody> """)
for c1 in cols1:
for c2 in cols1:
if c2 % 2 == 0:
for c3 in cols1:
for c4 in cols1:
if c4 % 2 == 0:
for c5 in cols1:
txt.write('<tr>')
for c6 in cols1:
if c6 % 4 == 0:
txt.write('<td><center>#' + cols[c1] + cols[c2] + cols[c3] +
cols[c4] + cols[c5] + cols[c6] +
'<center></td><td style = "background-color: #' +
cols[c1] + cols[c2] + cols[c3] +
cols[c4] + cols[c5] + cols[c6] +
'"></td>')
txt.write('</tr>')
txt.write("""</tbody>
</table>
</body>
</html>""")
|
# token_services/token_services.py
#import sys
import random
from itsdangerous import URLSafeTimedSerializer
#from myApp import app
def generate_confirmation_token(parWhat):
#MAIL_userName = os.environ['APP_MAIL_userName']
#MAIL_PASSWORD = os.environ['APP_MAIL_PASSWORD']
#secret_key=app.config['SECRET_KEY']
#salt_password=app.config['SECURITY_PASSWORD_SALT']
secret_key='spithas'
salt_password='chaos490px!'
serializer = URLSafeTimedSerializer(secret_key)
token=serializer.dumps(parWhat, salt_password)
return token
def confirm_token(parToken, parExpiration=3600):
#MAIL_userName = os.environ['APP_MAIL_userName']
#MAIL_PASSWORD = os.environ['APP_MAIL_PASSWORD']
#secret_key=app.config['SECRET_KEY']
#salt_password=app.config['SECURITY_PASSWORD_SALT']
secret_key='spithas'
salt_password='chaos490px!'
serializer = URLSafeTimedSerializer(secret_key)
try:
what = serializer.loads(
parToken,
salt=salt_password,
max_age=parExpiration
)
except:
return None
return what
def generate_mobileconfirmation_code(parWhat):
codeInt=random.randint(100000,999999)
codeStr=str(codeInt)
return codeStr
def generate_unique_sessionID():
#MAIL_userName = os.environ['APP_MAIL_userName']
#MAIL_PASSWORD = os.environ['APP_MAIL_PASSWORD']
#secret_key=app.config['SECRET_KEY']
#salt_password=app.config['SECURITY_PASSWORD_SALT']
secret_key='spithas'
salt_password='chaos490px!'
serializer = URLSafeTimedSerializer(secret_key)
token=serializer.dumps('satora', salt_password)
return token
|
from zipfile import ZipFile
import shutil, json, os
import tempfile
import lib.filewalker
import lib.interpreter
import subprocess
class KuanzaProto:
def __init__(self, zipfile):
self.zipfile = zipfile
zip = ZipFile(zipfile)
self.zip = zip
self.info = json.loads( zip.read( 'prototype.info' ).decode('utf-8') )
def extract(self, projectname, projectVariables, inline=False, doInit=True):
print('extracting files')
tempdir = tempfile.mkdtemp()
temppath = os.path.join(tempdir, 'prototype');
initpath = os.path.join(tempdir, 'init')
projectVariables.append({'TEMP_PATH': temppath})
projectVariables.append({'INIT_PATH': initpath})
for file in self.zip.namelist():
if( file.startswith('prototype/') or file.startswith('init/') ):
if( file.endswith('/') ):
newdir = os.path.join(tempdir, file )
os.makedirs( newdir )
else:
self.zip.extract(file, tempdir)
files = lib.filewalker.FileWalker(tempdir)
files.each( lambda filename: lib.interpreter.Interpreter(filename).interpret(projectVariables) )
if doInit:
if os.path.exists( initpath ):
self._init(temppath, initpath)
if inline:
self._copyinline( temppath )
else:
self._copy( temppath, projectname)
shutil.rmtree( tempdir )
def _hasinit(self, initpath):
if not self.getInit():
return False
if not os.path.exists( initpath ):
return False
return True
def _init(self, temppath, initpath):
workingdir = os.getcwd()
try:
os.chdir( initpath )
subprocess.call( self.info['init'] )
finally:
os.chdir(workingdir)
def _copyinline(self, temppath):
files = lib.filewalker.FileWalker( temppath )
files.each( lambda filename: shutil.copy(filename, os.getcwd() ) )
def _copy( self, temppath, projectname ):
shutil.move( temppath, projectname )
def close(self):
self.zip.close()
def getName(self):
return self.info['name']
def getInit(self):
if 'init' in self.info.keys():
return self.info['init']
return None
def getVariables(self):
if 'variables' not in self.info.keys():
return []
return self.info['variables']
def getPath(self):
return self.zipfile
@staticmethod
def checkIntegrity( filepath ):
result = False
proto = None
try:
proto = KuanzaProto( filepath )
if proto.getName() != None:
result = True
except:
result = False
finally:
if proto != None:
proto.close()
return result
@staticmethod
def findByPackage(package):
for zipfile in os.listdir( package.getPath() ):
if zipfile.endswith('.zip'):
yield KuanzaProto( os.path.join( package.getPath(), zipfile ) )
@staticmethod
def exists( package, name ):
return KuanzaProto.findByPackageAndName(package, name) != None
@staticmethod
def findByPackageAndName(package, name):
for proto in KuanzaProto.findByPackage(package):
if proto.getName() == name:
return proto
return None |
#!/usr/bin/python
import time
import csv
import bluetooth
import os, sys
import datetime
import smtplib
import string
import select
from pprint import pprint
class MyDiscoverer (bluetooth.DeviceDiscoverer) :
def pre_inquiry (self):
self.done = False
def device_discovered(self, address, device_class, name):
print "%s - %s" % (address , name)
if address not in self.discovered_list:
self.discovered_list.append(address)
def inquiry_complete(self):
self.done = True
def discover_my_devices(self):
self.discovered_list = []
self.find_devices(lookup_names = False, duration=15)
while True:
can_read, can_write, has_exc = select.select ([self ], [ ], [ ])
if self in can_read:
self.process_event()
if self.done:
break
return self.discovered_list
peopledata={} # static list of known people
ownerdata={} # static list of house owners
ghostlist={} # dictionary of people who have recently left and how long they've been gone
presence={} # dictionary of people who have already been announced and when they were last seen
peoplepresent=[] # list of people found
timestamp=[] # list timestamps that lists when people are found or when they leave
timelimit="0:01:00:000000" # time limit of when to remove someone from presence (the length of time someone can be gone before they can be reannounced)
FROM = "#"
smtptolist = ['#','#']
TO = ','.join(smtptolist)
for row in csv.reader(open('NameList')): #import static list of known people
peopledata[row[0]] = row[1:]
for row in csv.reader(open('OwnerList')): #import static list of owners
ownerdata[row[0]] = row[1:]
log = open("log.txt", "a")
print "Device List Loaded."
print
while(True):
snafu=0 #reset timestamp on every loop
owner=[] #reset owner presence on every loop
announce="" #reset announce string on every loop
ownerannounce = "" #reset ownerannouce string on every loop
devicesfound = MyDiscoverer().discover_my_devices() # search for nearby BT MAC Addresses at 5 second interval
snafu = datetime.datetime.now() # get current date/time
print "-------------"
print "DevicesFound: %s" %(str(devicesfound), )
#print "Presence Dictionary: %s" %(str(presence), )
#print "GhostList: %s" % (str(ghostlist), )
pprint('Presence Dictionary: %s' %(presence))
pprint('Ghostlist: %s' %(ghostlist))
print "-------------"
log.write(str(snafu))
log.write('\n-------------\n')
log.write('DevicesFound: %s\n' %(str(devicesfound), ))
log.write('Presence Dictionary: %s\n' %(str(presence), ))
log.write('GhostList: %s\n' % (str(ghostlist), ))
log.write('-------------\n')
for personpresent in presence: # for every person that has recently been present
if personpresent in devicesfound: # if they are currently in the house
if personpresent in ghostlist: # and if they are in the ghostlist
print "-------------"
print "%s has returned" %(personpresent, )
print "-------------"
del ghostlist[personpresent] # remove them from the ghostlist
else:
pass
else:
ghostlist[personpresent] = snafu # otherwise, update the ghostlist timestamp
for devicefound in devicesfound: # for every bluetooth device found
if devicefound in ownerdata: # if they are the owner
owner.append(devicefound) # add the device to the owner list
ownerannounce += ownerdata[devicefound][0] + " and " # create a text string listing the owners present
if devicefound in peopledata: #if they are in your known people list
timestamp.append(snafu) #add/update timestamp to snafu list
peoplepresent.append(devicefound) #add device to present list
if devicefound in presence: # if the device is already present
pass #do nothing
else: #otherwise...welcome them and notify the owners!
announce += peopledata[devicefound][0] + " and "
#os.system('mplayer ~/Dropbox/apps/bluebell/dev/music/%s -endpos 15' % (peopledata[devicefound][1], ))
print "-------------"
print "%s added to presence" %(devicefound, )
print "-------------"
else:
pass
presence = dict((zip(peoplepresent, timestamp))) # create the presence dictionary based on the people found and a timestamp
for persongone in ghostlist: #for every person not currently present
delta = str(ghostlist[persongone] - presence[persongone]) #determine how long they've been gone
#print delta
if delta > timelimit: #if they've been gone > x
if persongone in presence:
#print "-------------"
#print "%s removed from presence" %(personpresent, )
#print "-------------"
del presence[persongone] #remove them from presence so that they can be reannounced when they return
else:
pass
else:
pass
if (len(announce) > 0): # if people present have not yet been announced
if(len(owner) == 1): # check if there is only one owner home
ownerannounce = ownerannounce[:-4]
ownerannounce = ownerannounce + " is home and will be with you shortly "
if(len(owner) == 0): # check if there is only one owner home
ownerannounce = " Noone is currently home. But they have been notified of your presence "
if (len(owner) > 1): #check if there is more than one owner home
ownerannounce = ownerannounce[:-4]
ownerannounce = ownerannounce + " are home and will be with you shortly "
announce = announce[:-4]
server = smtplib.SMTP('smtp.gmail.com',587)
server.ehlo()
server.starttls()
server.ehlo
server.login('#', '#')
server.set_debuglevel(1)
BODY = string.join((
"From: %s" % FROM,
"To: %s" % TO,
"Subject: At your door: %s " % (announce, ),
"",
), "\r\n")
server.sendmail(FROM, smtptolist, BODY)
server.close()
announce = "Welcome " + announce + ". "
completeannounce = announce + ownerannounce
#completeannounce = completeannounce.replace(' ','%20')
os.system('mplayer "http://translate.google.com/translate_tts?tl=en&q=%s"' % completeannounce, )
#os.system('btplay "http://translate.google.com/translate_tts?tl=en&q=%s"' % completeannounce, )
#time.sleep(5)
#os.system('btplay "http://translate.google.com/translate_tts?tl=en&q=%s"' % ownerannounce, )
#print announce
#print ownerannounce
|
import os
import re
import pydantic
from ansible_collections.nhsd.apigee.plugins.module_utils.models.manifest.manifest import (
Manifest,
)
def correct_namespace(name, api_name, env_name) -> bool:
"""
Checks that a name of a thing we want to create in Apigee matches our namespacing conventions.
e.g. for api_name="canary-api" and env_name="internal-dev"
|--------------------------------------------------------------+--------|
| name | result |
|--------------------------------------------------------------+--------|
| "canary-api-internal-dev" | True |
| "canary-api-extra-thing-internal-dev" | True |
| "canary-apiinternal-dev" | False |
| "canary-api-internal-dev-application-restricted" | True |
| "canary-api-extra-thing-internal-dev-application-restricted" | True |
|--------------------------------------------------------------+--------|
:param name: Name of thing in Apigee.
:param api_name: The meta.api.name item from your manifest
:param env_name: The environment name (e.g. 'internal-dev', 'int', or 'prod')
"""
regex = f"^{api_name}(-[a-z]+)*-{env_name}(-[a-z]+)*$"
return bool(re.match(regex, name))
class ValidateManifest(pydantic.BaseModel):
dist_dir: pydantic.DirectoryPath = ""
manifest: Manifest
service_name: str = ""
@pydantic.validator("service_name")
def check_service_name(cls, service_name, values):
if service_name:
manifest = values.get("manifest")
if not manifest:
return
meta = manifest.meta
if not meta:
return
api_name = meta.api.name
if not re.match(f"{api_name}(-[a-z]+)*", service_name):
raise ValueError(
f"pipeline defined SERVICE_NAME ('{service_name}') does not begin with manifest defined meta.api.name ('{api_name}')"
)
@pydantic.validator("manifest", pre=True)
def prepend_dist_dir_to_spec_paths(cls, manifest, values):
dist_dir = values.get("dist_dir")
print(dist_dir)
if not dist_dir:
return manifest
apigee = manifest["apigee"]
for env_dict in apigee["environments"]:
for spec_dict in env_dict["specs"]:
path = spec_dict.get("path")
if path is not None:
spec_dict["path"] = os.path.join(dist_dir, path)
return manifest
@pydantic.validator("manifest")
def check_namespacing(cls, manifest, values):
if not manifest.meta:
return
api_name = manifest.meta.api.name
for env in manifest.apigee.environments:
if env is None:
continue
for product in env.products:
if not correct_namespace(product.name, api_name, env.name):
raise ValueError(
f"{product.name} does not conform to namespace {api_name}-*{env.name}"
)
for spec in env.specs:
if not correct_namespace(spec.name, api_name, env.name):
raise ValueError(
f"{spec.name} does not conform to namespace for {api_name}-*{env.name}"
)
return manifest
|
# File requires working python.pcl
import os
import sys
import glob
from math import isclose
import numpy as np
import skimage.io as io
from skimage.viewer import ImageViewer
from skimage.viewer.canvastools import RectangleTool
import matplotlib.pyplot as plt
relative_utils_path = '../../../utils'
utils_path = os.path.join(os.path.dirname(__file__), relative_utils_path)
sys.path.append(utils_path)
from pcl_helper import float_to_rgb
import pcl
from svm_predictor import SVMPredictor
relative_dataset_path = '../../../../data/pointclouds7'
dataset_path = os.path.join(os.path.dirname(__file__), relative_dataset_path)
relative_output_dir = '../../../../data/pointclouds7/patches'
output_dir = os.path.join(os.path.dirname(__file__), relative_output_dir)
def get_rgb_data_from_pointcloud():
pass
def get_rgb_image_from_pcd(path):
pointcloud = pcl.load_XYZRGB(path)
pointcloud_np = pointcloud.to_array()
# Compute RGB image
rgb_image = []
for i in range(0, pointcloud_np.shape[0]):
rgb_image.append(float_to_rgb(pointcloud_np[i, 3]))
rgb_image_np = np.asarray(rgb_image, dtype=np.uint8)
rgb_image_np = np.reshape(rgb_image_np, (480, 640, 3))
return rgb_image_np
def get_rgb_image_from_npy(path):
pointcloud_np = np.load(path)
# Compute RGB image
rgb_image = []
for i in range(0, pointcloud_np.shape[0]):
rgb_image.append(float_to_rgb(pointcloud_np[i, 3]))
rgb_image_np = np.asarray(rgb_image, dtype=np.uint8)
rgb_image_np = np.reshape(rgb_image_np, (480, 640, 3))
return rgb_image_np
def annotate_data(annotation_file_path):
print('No annotation file found. Annotation required.')
files = glob.glob(os.path.join(dataset_path, '*.*'))
annotations = []
for file in files:
print(file)
file_extension = os.path.splitext(file)[1]
if file_extension == '.pcd':
image = get_rgb_image_from_pcd(file)
elif file_extension == '.npy':
image = get_rgb_image_from_npy(file)
else:
print('Unsupported file type.')
continue
viewer = ImageViewer(image)
def enter_callback(extents):
bbox = (int(extents[2]), int(extents[0]), int(extents[3] - extents[2]), int(extents[1] - extents[0]))
if bbox != (0,0,1,0):
annotations.append((file, bbox))
print(bbox)
else:
print('No annotations were made.')
viewer.close()
rect_tool = RectangleTool(viewer, on_enter=enter_callback)
viewer.show()
annotation_str = ''
for annotation in annotations:
file_name = os.path.basename(annotation[0])
bbox_row_min = annotation[1][0]
bbox_col_min = annotation[1][1]
bbox_rows = annotation[1][2]
bbox_cols = annotation[1][3]
with open(annotation_file_path, 'a') as text_file:
print('{};{};{};{};{}'.format(file_name, bbox_row_min, bbox_col_min, bbox_rows, bbox_cols), file=text_file)
# Function Code taken from https://stackoverflow.com/questions/25349178/calculating-percentage-of-bounding-box-overlap-for-image-detector-evaluation
def get_iou(bb1, bb2):
"""
Calculate the Intersection over Union (IoU) of two bounding boxes.
"""
bb1_x1 = bb1[1]
bb1_x2 = bb1[1] + bb1[3]
bb1_y1 = bb1[0]
bb1_y2 = bb1[0] + bb1[2]
bb2_x1 = bb2[1]
bb2_x2 = bb2[1] + bb2[3]
bb2_y1 = bb2[0]
bb2_y2 = bb2[0] + bb2[2]
# determine the coordinates of the intersection rectangle
x_left = max(bb1_x1, bb2_x1)
y_top = max(bb1_y1, bb2_y1)
x_right = min(bb1_x2, bb2_x2)
y_bottom = min(bb1_y2, bb2_y2)
if x_right < x_left or y_bottom < y_top:
return 0.0
# The intersection of two axis-aligned bounding boxes is always an
# axis-aligned bounding box
intersection_area = (x_right - x_left) * (y_bottom - y_top)
# compute the area of both AABBs
bb1_area = (bb1_x2 - bb1_x1) * (bb1_y2 - bb1_y1)
bb2_area = (bb2_x2 - bb2_x1) * (bb2_y2 - bb2_y1)
# compute the intersection over union by taking the intersection
# area and dividing it by the sum of prediction + ground-truth
# areas - the interesection area
iou = intersection_area / float(bb1_area + bb2_area - intersection_area)
assert iou >= 0.0
assert iou <= 1.0
return iou
def centroid_lies_in_bbox(centroid, bbox):
centroid_within_row_range = centroid[0] >= bbox[0] and centroid[0] <= (bbox[0] + bbox[2])
centroid_within_col_range = centroid[1] >= bbox[1] and centroid[1] <= (bbox[1] + bbox[3])
return centroid_within_row_range and centroid_within_col_range
def extract_patches_from_image(image, height, width, stride, bbox):
min_required_overlap = 0.4
print('Processing image...')
row_count = image.shape[0]
col_count = image.shape[1]
start_row = 0
start_col = 0
end_row = row_count
end_col = col_count
patches = [] # Format (row, col, height, width)
for col in range(start_col, (end_col - width) + 1, stride):
for row in range(start_row, (end_row - height) + 1, stride):
patch = image[row:row+height, col:col+width]
patch_bbox = (row, col, height, width)
patch_bbox_centroid = (int(patch_bbox[0] + 0.5 * patch_bbox[2]), int(patch_bbox[1] + 0.5 * patch_bbox[3]))
overlap = get_iou(bbox, patch_bbox)
if overlap > min_required_overlap and centroid_lies_in_bbox(patch_bbox_centroid, bbox):
#plt.imshow(patch)
#plt.show()
label = 1
else:
label = 0
patches.append((patch, label))
return patches
def extract_patches(annotation_file_path, percentage_positive):
print('Extracting patches according to annotation file {}.'.format(annotation_file_path))
with open(annotation_file_path, 'r') as f:
data= f.read()
annotation_strs = data.split('\n')
overall_patches = []
for annotation_str in annotation_strs:
if not annotation_str:
continue
annotation_split = annotation_str.split(';')
file_name = annotation_split[0]
bbox_row_min = int(annotation_split[1])
bbox_col_min = int(annotation_split[2])
bbox_rows = int(annotation_split[3])
bbox_cols = int(annotation_split[4])
bbox = (bbox_row_min, bbox_col_min, bbox_rows, bbox_cols)
file_extension = os.path.splitext(file_name)[1]
if file_extension == '.pcd':
image = get_rgb_image_from_pcd(os.path.join(dataset_path, file_name))
patches = extract_patches_from_image(image, 70, 70, 4, bbox)
overall_patches.extend(patches)
elif file_extension == '.npy':
image = get_rgb_image_from_npy(os.path.join(dataset_path, file_name))
patches = extract_patches_from_image(image, 70, 70, 4, bbox)
overall_patches.extend(patches)
else:
print('Unsupported file type.')
continue
# Select patches according to split ratio
patches_np = np.array(overall_patches)
np.random.shuffle(patches_np)
labels_np = patches_np[:,1]
positive_samples_mask = labels_np == 1
negative_samples_mask = labels_np == 0
positive_count = positive_samples_mask.sum()
if isclose(percentage_positive, 0.0):
negative_count = negative_samples_mask.sum()
else:
percentage_negative = 1.0 - percentage_positive
one_percent = positive_count / int(percentage_positive * 100)
negative_count = int(percentage_negative * 100 *one_percent)
positive_samples = patches_np[positive_samples_mask]
negative_samples = patches_np[negative_samples_mask]
selected_patches = []
for i in range(0, positive_count):
selected_patches.append(positive_samples[i])
for i in range(0, negative_count):
selected_patches.append(negative_samples[i])
return np.array(selected_patches)
def write_patches_to_dir(patches, output_dir):
print('Writing patches to filesystem...')
for i, patch in enumerate(patches):
if i % 100 == 0:
print('File {}/{}'.format(i, len(patches)))
image = patch[0]
label = patch[1]
if label == 1:
io.imsave(os.path.join(output_dir, 'pos', str(i) + '.png'), image)
else:
io.imsave(os.path.join(output_dir, 'neg', str(i) + '.png'), image)
def create_dataset():
annotation_file_path = os.path.join(dataset_path, 'cup_annotations.txt')
if not os.path.isfile(annotation_file_path):
annotate_data(annotation_file_path)
patches = extract_patches(annotation_file_path, 0.2)
write_patches_to_dir(patches, output_dir)
def perform_hn_mining():
annotation_file_path = os.path.join(dataset_path, 'cup_annotations.txt')
if not os.path.isfile(annotation_file_path):
annotate_data(annotation_file_path)
patches = extract_patches(annotation_file_path, 0.0)
svm_predictor = SVMPredictor()
svm_predictor.initialize_predictor()
sample_count = min(100000, patches.shape[0])
samples = patches[0:sample_count-1, 0]
print('Predicting samples. This can take a while.')
results = svm_predictor.predict(samples)
false_positives = []
for result, patch in zip(results, patches):
if result[1] > 0.5 and patch[1] == 0:
false_positives.append(patch)
write_patches_to_dir(false_positives, output_dir)
if __name__=='__main__':
#create_dataset()
perform_hn_mining()
|
import subprocess, shutil, io, os
import pandas
def run_one(*, Tstar, segment_density, chain_length):
if not isinstance(Tstar, float):
Tstar = float(Tstar)
# Build the chain
import chain_builder as cb; cb.build_chain(segment_density=segment_density, Nchains=320, chain_length=chain_length, ofname='data.fene', verbose=True)
# print(open('data.fene').read())
Nproc = 12
# Relax the chain to remove overlapping segments
subprocess.check_call(f'mpirun -np {Nproc} --allow-run-as-root lammps -sf opt -in do_chain.lammps -var Tstar {Tstar}',shell=True)
# print(open('chain_equil.data').read())
# Do an equilibrium run from the de-overlapped configuration
subprocess.check_call(f'mpirun -np {Nproc} --allow-run-as-root lammps -sf opt -in do_run.lammps -var segment_density {segment_density} -var Tstar {Tstar}',shell=True)
# print(open('out.dump').read())
# Copy it into the output folder if that folder exists
if os.path.exists('/out'):
shutil.copy2('out.dump','/out')
contents = open('out.dump').read().replace('# ','')
df = pandas.read_csv(io.StringIO(contents), sep=r'\s+',skiprows=1)
# print('means:::::::::::::::::::::::::::')
# for col in df.keys():
# print(col, df[col].mean())
return {
'chain_length': chain_length,
'T': df['v_Temp'].mean(),
'p': df['v_Press'].mean(),
'rho_seg': df['v_Natoms'].mean()/df['v_Volume'].mean(),
'Unonbonded/chain': df['v_vdWEnergy'].mean()*chain_length, # vdWEnergy is per atom
'Uintra/chain': df['v_vdWIntra'].mean()/df['v_Natoms'].mean()*chain_length, # outputs of the compute pe/mol/all are extensive
'Uinter/chain': df['v_vdWInter'].mean()/df['v_Natoms'].mean()*chain_length, # outputs of the compute pe/mol/all are extensive
}
if __name__ == '__main__':
assert(os.path.exists('/out'))
# with open('/out/something','w') as fp:
# fp.write('hihi')
outputs = []
for segment_density in [0.001]:
for Tstar in [2.05,3.00,4.00,6.00,8.00,10.00,50.00,100.00]:
try:
outputs.append(run_one(Tstar=Tstar, chain_length=3, segment_density=segment_density))
except:
pass
pandas.DataFrame(outputs).to_csv('/out/chains3_isolated.csv', index=False)
# outputs = []
# for segment_density in [0.9, 0.8, 0.7, 0.6, 0.5, 0.4, 0.3, 0.2, 0.1]:
# try:
# outputs.append(run_one(Tstar=10.0, chain_length=3, segment_density=segment_density))
# except:
# pass
# pandas.DataFrame(outputs).to_csv('/out/chains3_T4.csv', index=False)
|
import requests
import os
import urllib
import urllib2
from sendmail import send_mail_function
from flask import Flask, render_template, request
app = Flask(__name__)
@app.route('/')
def index():
return "Email Service Active"
@app.route('/test')
def test():
send_mail_function("jplservice00@gmail.com", "Test email", "Someone hit the test URL")
return "Test Email Sent"
@app.route('/sendmail', methods=['POST'])
def send_mail():
print "sendmail service"
# Get email parameters
name=request.form['name'] # This is currently not used
email=request.form['email']
subject=request.form['subject']
body=request.form['body']
# Send mail
send_mail_function(email, subject, body)
return "OK"
if __name__ == '__main__':
app.run(debug=True,host=os.getenv('IP', '0.0.0.0'),port=int(os.getenv('PORT', 5011)))
|
vowel=["a","e","i","o","u"]
for i in vowel:
word=i
for j in vowel:
word+=j
print (word)
|
from PyQt4 import QtGui, QtCore
class AddStationDialog(QtGui.QDialog):
def __init__(self, parent):
QtGui.QDialog.__init__(self)
self.signal = "addstation"
# parent.setEnabled(False)
self.setParent(parent)
self.radio_name = QtGui.QLineEdit()
self.radio_adress = QtGui.QLineEdit()
self.radio_name.setPlaceholderText("Give the name of the radio station")
self.radio_adress.setPlaceholderText("Give the address of the radio station")
self.radio_label = QtGui.QLabel()
self.radio_name_label = QtGui.QLabel()
self.radio_name_label.setText("Give the name of the radio: ")
self.radio_adress_label = QtGui.QLabel("Give the address of the radio: ")
buttonbox = QtGui.QDialogButtonBox(QtGui.QDialogButtonBox.Ok | QtGui.QDialogButtonBox.Cancel)
buttonbox.accepted.connect(self.accept)
buttonbox.rejected.connect(self.reject)
vbox = QtGui.QVBoxLayout()
hbox = QtGui.QHBoxLayout()
hbox1 = QtGui.QHBoxLayout()
hbox2 = QtGui.QHBoxLayout()
hbox1.addWidget(self.radio_name_label)
hbox1.addWidget(self.radio_name)
hbox2.addWidget(self.radio_adress_label)
hbox2.addWidget(self.radio_adress)
vbox.addLayout(hbox1)
vbox.addLayout(hbox2)
hbox.addWidget(self.radio_label)
hbox.addWidget(buttonbox)
vbox.addLayout(hbox)
self.setLayout(vbox)
self.setFixedSize(450, 100)
self.setWindowFlags(QtCore.Qt.Dialog)
def accept(self):
if self.radio_name.text() == "" or self.radio_adress.text() == "":
self.radio_label.setText("You must fill both fields")
if self.radio_name.text() != "" and self.radio_adress.text() != "":
self.emit(QtCore.SIGNAL(self.signal))
def reject(self):
self.hide() |
'''
Created on Sep 29, 2014
@author: mendt
'''
import unittest, logging, time
from georeference.settings import DBCONFIG_PARAMS
from georeference.utils.tools import loadDbSession
from georeference.georeferenceupdate import runningResetJobs, runningNewJobs, runningUpdateJobs, lookForUpdateProcess
from vkviewer.python.utils.logger import createLogger
from vkviewer.python.models.messtischblatt.Georeferenzierungsprozess import Georeferenzierungsprozess
class GeoreferenceUpdateTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
print '=============='
print 'Start georeferenceupdate tests ...'
print '=============='
cls.logger = createLogger(name = 'GeoreferenceUpdateTest', level = logging.DEBUG)
@unittest.skip('Skip testRunningResetJobs')
def testRunningResetJobs(self):
print "--------------------------------------------------------------------------------------------"
print "\n"
print "Test if runningResetJobs runs correctly ..."
dbsession = loadDbSession(DBCONFIG_PARAMS, self.logger)
response = runningResetJobs(dbsession, self.logger, True)
# check if the reset functions
resetJobs = Georeferenzierungsprozess.getResetJobs(dbsession)
counter = 0
for job in resetJobs:
counter += 1
self.assertTrue(counter == 0, 'There are unprocessed reset jobs ...')
dbsession.rollback()
@unittest.skip('Skip testRunningNewJobs')
def testRunningNewJobs(self):
print "--------------------------------------------------------------------------------------------"
print "\n"
print "Test if testRunningNewJobs runs correctly ..."
dbsession = loadDbSession(DBCONFIG_PARAMS, self.logger)
response = runningNewJobs(dbsession, self.logger, True)
# add tests
# @TODO
dbsession.rollback()
@unittest.skip('Skip testRunningNewJobs')
def testRunningUpdateJobs(self):
print "--------------------------------------------------------------------------------------------"
print "\n"
print "Test if testRunningUpdateJobs runs correctly ..."
dbsession = loadDbSession(DBCONFIG_PARAMS, self.logger)
response = runningUpdateJobs(dbsession, self.logger, True)
# add tests
# @TODO
dbsession.rollback()
@unittest.skip('Skip testLookForUpdateProcess')
def testLookForUpdateProcess(self):
print "--------------------------------------------------------------------------------------------"
print "\n"
print "Test if testLookForUpdateProcess runs correctly ..."
dbsession = loadDbSession(DBCONFIG_PARAMS, self.logger)
response = lookForUpdateProcess(dbsession, self.logger, True)
# add tests
# @TODO
dbsession.rollback()
#@unittest.skip('Skip testLookForUpdateProcess_Infinity')
def testLookForUpdateProcess_Infinity(self):
print "--------------------------------------------------------------------------------------------"
print "\n"
print "Test if testLookForUpdateProcess_Infinity runs correctly ..."
dbsession = loadDbSession(DBCONFIG_PARAMS, self.logger)
while True:
print "New loop run ..."
lookForUpdateProcess(dbsession, self.logger, True)
dbsession.commit()
print "Long sleep ..."
time.sleep(10)
# add tests
# @TODO
if __name__ == "__main__":
#import sys;sys.argv = ['', 'Test.testName']
unittest.main() |
import sys
from django.http import JsonResponse
from django.views import View
from django.db.models import Sum, Q
from .models import Product, ProductSize, Image, ProductContent, Category, Country
class ProductCategories(View):
def get(self, request):
result = {
'categories' : [
{
'id' : object.id,
'name' : object.name,
'imageUrl' : object.image_url
} for object in Category.objects.all()
],
'countries' : [
{
'id' : object.id,
'name' : object.name,
'imageUrl' : object.image_url
} for object in Country.objects.all()
]
}
return JsonResponse({'productCategories' : result}, status=200)
class ProductDetails(View):
def get(self, request, products_id):
product = Product.objects.get(id=products_id)
images = Image.objects.filter(product_id=product.id)
product_contents = ProductContent.objects.filter(product_id=product.id)
product_sizes = ProductSize.objects.filter(product_id=product.id)
result={
'id' : product.id,
'categoryId' : product.category.id,
'category' : product.category.name,
'name' : product.name,
'description' : product.description,
'country' : product.country.name,
'countryId' : product.country.id,
'color' : product.color,
'priceAndSize' : [{'sizeId': product_size.size.id, 'sizeName' : product_size.size.name, 'price' : product_size.price, 'stock' : product_size.stock} for product_size in product_sizes],
'image' : [image.url for image in images],
'productSubstance' : [{'name' : product_content.content.name, 'value': product_content.percent} for product_content in product_contents]
}
return JsonResponse({'productDetails' : result}, status=200)
class ProductListInfo(View):
def get(self, request):
catch = request.GET.get('catch', None)
color = request.GET.get('color', None)
price_max = request.GET.get('priceMax', sys.maxsize)
price_min = request.GET.get('priceMin', 0)
country_id = request.GET.get('country', None)
category_id = request.GET.get('category', None)
q = Q()
if country_id:
q.add(Q(country_id=country_id), q.AND)
if category_id:
pattern_identifier = Category.objects.get(id=category_id).name
q.add(Q(category_id=category_id), q.AND)
q.add(Q(category_id=6, name__istartswith=pattern_identifier), q.OR)
if catch:
q.add(Q(catch_code=catch), q.AND)
if color:
q.add(Q(color=color), q.AND)
q.add(Q(productsize__price__range=(price_min, price_max)), q.AND)
q.add(Q(productsize__size_id=3), q.AND)
result = [
{
'id' : product.id,
'name' : product.name,
'catchCode' : product.catch_code,
'countryId' : product.country.id,
'categoryId' : product.category.id,
'price' : product.productsize_set.filter(size_id=3).first().price, #size_id=3 이 가장 저렴한 small size 입니다
'thumbNail' : product.image_set.filter(product_id=product.id).first().url,
'stock' : product.productsize_set.filter(product_id=product.id).aggregate(Sum('stock'))['stock__sum']
} for product in Product.objects.filter(q)
]
return JsonResponse({'productListInfo' : result}, status=200)
class ProductList(View):
def get(self,request):
# main 페이지 10개 게시물 뽑아오기
result = [
{
'id' : object.id,
'name' : object.name,
'price' : object.productsize_set.filter(size_id=3).first().price,
'thumbNail' : object.image_set.all().order_by('id').first().url,
'catchCode' : object.catch_code,
'stock' : object.productsize_set.aggregate(Sum('stock'))['stock__sum']
} for object in Product.objects.all().order_by('-created_at')[:10]
]
return JsonResponse({"productList":result},status = 200)
|
import torch.nn.functional as F
import torch.nn as nn
import torch
import numpy
from locked_dropout import LockedDropout
class LayerNorm(nn.Module):
def __init__(self, features, eps=1e-6):
super(LayerNorm, self).__init__()
self.gamma = nn.Parameter(torch.ones(features))
self.beta = nn.Parameter(torch.zeros(features))
self.eps = eps
def forward(self, x):
mean = x.mean(-1, keepdim=True)
std = x.std(-1, keepdim=True)
return self.gamma * (x - mean) / (std + self.eps) + self.beta
class LinearDropConnect(nn.Linear):
def __init__(self, in_features, out_features, bias=True, dropout=0.):
super(LinearDropConnect, self).__init__(
in_features=in_features,
out_features=out_features,
bias=bias
)
self.dropout = dropout
def sample_mask(self):
if self.dropout == 0.:
self._weight = self.weight
else:
mask = self.weight.new_empty(
self.weight.size(),
dtype=torch.uint8
)
mask.bernoulli_(self.dropout)
self._weight = self.weight.masked_fill(mask.bool(), 0.)
def forward(self, input, sample_mask=False):
if self.training:
if sample_mask:
self.sample_mask()
return F.linear(input, self._weight, self.bias)
else:
return F.linear(input, self.weight * (1 - self.dropout),
self.bias)
def cumsoftmax(x, dim=-1):
return torch.cumsum(F.softmax(x, dim=dim), dim=dim)
def softmax(x, dim=-1):
return F.softmax(x, dim=dim)
def cum(x, dim=-1):
return torch.cumsum(x, dim=dim)
class ONLSTMCell(nn.Module):
def __init__(self, input_size, hidden_size, chunk_size, wds='no', dropconnect=0.):
super(ONLSTMCell, self).__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.chunk_size = chunk_size
self.n_chunk = int(hidden_size / chunk_size)
self.ih = nn.Sequential(
nn.Linear(input_size, 4 * hidden_size + self.n_chunk * 2, bias=True),
# LayerNorm(3 * hidden_size)
)
self.hh = LinearDropConnect(hidden_size, hidden_size * 4 + self.n_chunk * 2, bias=True, dropout=dropconnect)
# self.c_norm = LayerNorm(hidden_size)
# self.fwh = LinearDropConnect(self.n_chunk, self.n_chunk, bias=True, dropout=dropconnect)
# self.drop_weight_modules = [self.hh,self.fwh]
self.wds = wds
if self.wds != 'no':
self.fwh = LinearDropConnect(self.n_chunk, self.n_chunk, bias=True, dropout=dropconnect)
self.drop_weight_modules = [self.hh, self.fwh]
else:
self.drop_weight_modules = [self.hh]
# self.wds = wds
# if self.wds != 'no':
# self.weighted_sd_vector = nn.Parameter(torch.zeros(self.n_chunk))
def forward(self, input, hidden,
transformed_input=None):
hx, cx = hidden
if transformed_input is None:
transformed_input = self.ih(input)
gates = transformed_input + self.hh(hx)
cingate_raw, cforgetgate_raw = gates[:, :self.n_chunk * 2].chunk(2, 1)
outgate, cell, ingate, forgetgate = gates[:, self.n_chunk * 2:].view(-1, self.n_chunk * 4,
self.chunk_size).chunk(4, 1)
cingate = 1. - cumsoftmax(cingate_raw)
distance_cin = cingate.sum(dim=-1) / self.n_chunk
cforgetgate = cumsoftmax(cforgetgate_raw)
distance_cforget = 1. - cforgetgate.sum(dim=-1) / self.n_chunk
if self.wds != 'no':
c_w_forgetgate = cumsoftmax(self.fwh(cforgetgate_raw))
distance_w_cforget = 1. - c_w_forgetgate.sum(dim=-1) / self.n_chunk
else:
distance_w_cforget = distance_cforget
cingate = cingate[:, :, None]
cforgetgate = cforgetgate[:, :, None]
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cell = torch.tanh(cell)
outgate = torch.sigmoid(outgate)
# cy = cforgetgate * forgetgate * cx + cingate * ingate * cell
overlap = cforgetgate * cingate
forgetgate = forgetgate * overlap + (cforgetgate - overlap)
ingate = ingate * overlap + (cingate - overlap)
cy = forgetgate * cx + ingate * cell
# hy = outgate * F.tanh(self.c_norm(cy))
hy = outgate * torch.tanh(cy)
# self.last = [transformed_input, cforgetgate, weight, distance_cforget,hy,cy]
# if self.wds != 'no':
# # return hy.view(-1, self.hidden_size), cy ,(origin_distance_cforget, distance_cforget, distance_cin,self.weighted_sd_vector)
# else:
return hy.view(-1, self.hidden_size), cy ,(distance_cforget, distance_w_cforget, distance_cin,distance_cin)
def init_hidden(self, bsz):
weight = next(self.parameters()).data
return (weight.new(bsz, self.hidden_size).zero_(),
weight.new(bsz, self.n_chunk, self.chunk_size).zero_())
def sample_masks(self):
for m in self.drop_weight_modules:
m.sample_mask()
class ONLSTMStack(nn.Module):
def __init__(self, layer_sizes, chunk_size, l4d=0,wds='no', dropout=0., dropconnect=0.):
super(ONLSTMStack, self).__init__()
self.cells = nn.ModuleList([ONLSTMCell(layer_sizes[i],
layer_sizes[i + 1],
chunk_size,
wds=wds,
dropconnect=dropconnect)
for i in range(len(layer_sizes) - 1)])
self.lockdrop = LockedDropout()
self.dropout = dropout
self.sizes = layer_sizes
self.l4d = l4d
def init_hidden(self, bsz):
return [c.init_hidden(bsz) for c in self.cells]
def forward(self, input, hidden):
length, batch_size, _ = input.size()
if self.training:
for c in self.cells:
c.sample_masks()
prev_state = list(hidden)
prev_layer = input
raw_outputs = []
outputs = []
distances_forget = []
origin_distances_forget = []
distances_in = []
weighted_sd_vector=[]
for l in range(len(self.cells)):
curr_layer = [None] * length
dist = [None] * length
t_input = self.cells[l].ih(prev_layer)
for t in range(length):
hidden, cell, d = self.cells[l](
None, prev_state[l],
transformed_input=t_input[t]
)
prev_state[l] = hidden, cell # overwritten every timestep
curr_layer[t] = hidden
dist[t] = d
prev_layer = torch.stack(curr_layer)
origin_dist_cforget, dist_cforget, dist_cin, wsd_vector = zip(*dist)
origin_dist_layer_cforget = torch.stack(origin_dist_cforget)
dist_layer_cforget = torch.stack(dist_cforget)
dist_layer_cin = torch.stack(dist_cin)
wsd_layer_vector = torch.stack(wsd_vector)
raw_outputs.append(prev_layer)
if l < len(self.cells) - 1:
prev_layer = self.lockdrop(prev_layer, self.dropout)
outputs.append(prev_layer)
distances_forget.append(dist_layer_cforget)
origin_distances_forget.append(origin_dist_layer_cforget)
distances_in.append(dist_layer_cin)
if l == self.l4d:
weighted_sd_vector.append(wsd_layer_vector)
output = prev_layer
# print(self.cells[2].weighted_vector[0])
return output, prev_state, raw_outputs, outputs, (torch.stack(origin_distances_forget),torch.stack(distances_forget), torch.stack(distances_in), torch.stack(weighted_sd_vector))
if __name__ == "__main__":
x = torch.Tensor(10, 10, 10)
x.data.normal_()
lstm = ONLSTMStack([10, 10, 10], chunk_size=10)
print(lstm(x, lstm.init_hidden(10))[1])
|
import os
BASE_URL = "https://api.setlist.fm/rest/1.0"
API_BASE = 'https://accounts.spotify.com'
REDIRECT_URI = "http://localhost:5000/api_callback"
SCOPE = 'playlist-modify-private,playlist-modify-public,user-top-read'
API_KEY = os.environ.get('SETLIST_API_KEY')
SPOTIFY_CLIENT_ID = os.environ.get('SPOTIFY_CLIENT_ID')
SPOTIFY_CLIENT_SECRET = os.environ.get('SPOTIFY_CLIENT_SECRET')
REDIS_URL = os.environ.get('REDIS_URL')
|
import struct
from common.exception import MessageHeaderError
from common.constants import ERR_MSG_HDR_BAD_MSG_LEN
class KeepAlive(object):
MSG_KEEPALIVE = 4
@staticmethod
def parse(msg):
if len(msg) == 0:
raise MessageHeaderError(sub_error=ERR_MSG_HDR_BAD_MSG_LEN,data='')
@staticmethod
def construct_header():
"""
# 16-octet 2-octet 1-octet
# ---------------+--------+---------+------+
# Maker | Length | Type | msg |
# ---------------+--------+---------+------+
TYPE
1 - OPEN
2 - UPDATE
3 - NOTIFICATION
4 - KEEPALIVE
"""
return b"".join((b'\xff'*16,struct.pack("!HB",19,4)))
def construct(self):
return self.construct_header()
|
print("rajeev", 5)
# separator between arguments, end can change print ending
print("rahul ", " king", sep="@", end="")
print(" and great")
# input function
# returns string of characters
name = input("Enter your name : ")
print(name)
year = input("In what year were you born? ")
print(type(year))
# use split() method to capture multiple information seperated with space.
reply = input("Enter x and y, separated by spaces: ")
print(reply.split()) |
class Drone(object):
"""Drone
Virtual representation of a drone bot to help with logic processing.
"""
states = [
'Idle',
duration
'Deploying',
complete
duration
'Searching',
tracking
duration
'Relocating'
duration
'Attacking',
duration
distance
prey_tracker_state
captured
'SearchingNest',
tracking
duration
'Returning',
duration
tracking
complete
'Delivering',
duration
'Disconnected',
duration
]
msg_types = [
'Discover',
'DiscoverResponse',
'Heartbeat',
'IdleResponse',
'DeployingResponse',
'SearchingResponse',
'TrackingResponse',
'AttackingResponse',
'CapturingResponse',
'ReturningResponse',
'DeliveringResponse',
'DisconnectedResponse',
]
def __init__(self, drone_id):
self.id = drone_id
self.prey_captured = None
self.target_sight = None
self.hive_sight = None
|
import pygame
'''
@class Sprite
@abstract
'''
class Sprite:
'''
Constructor.
'''
def __init__(self):
pass
'''
Draw this Sprite.
@param Surface screen
'''
def onDraw(self, screen):
pass
'''
Execute a single step for this Sprite.
'''
def onStep(self):
pass |
import copy
import random
class Assembler():
def __init__(self,fragments):
self.fragments = {}
for idx, fragment in enumerate(fragments):
self.fragments[idx] = list(fragment)
def _fragment_matcher(self, top_fragment, bottom_fragment):
'''
compares two fragments and determines match
'''
top_len = len(top_fragment)
bottom_len = len(bottom_fragment)
min_match_length = (max(top_len, bottom_len) / 2) + 1
match_found = False
bottom_string = ''.join(bottom_fragment[0:min_match_length])
x = 0
x_stop = x + min_match_length
while x_stop <= top_len:
top_string = top_fragment[x: x_stop]
if ''.join(top_string) == bottom_string:
match_found = True
return x
x +=1
x_stop +=1
return False
def _find_matching_fragment_pairs(self):
'''
compares
'''
map_top_bottom = {}
map_bottom_top = {}
for ID_top, top_fragment in self.fragments.iteritems():
for ID_bottom, bottom_fragment in self.fragments.iteritems():
if ID_top != ID_bottom:
match_index = self._fragment_matcher(top_fragment, bottom_fragment)
if match_index:
map_top_bottom[ID_top] = {
'ID_bottom_match' : ID_bottom,
'match_index' : match_index
}
map_bottom_top[ID_bottom] = {
'ID_top_match' : ID_top
}
# print 'TOP_BOTTOM'
# for k,v in map_top_bottom.iteritems():
# print k,v
#
# print 'BOTTOM_TOP'
# for k,v in map_bottom_top.iteritems():
# print k,v
return map_top_bottom, map_bottom_top
def _determine_order(self, map_top_bottom, map_bottom_top):
'''
determines the order of the sequences using the matching pairs info
'''
order = []
ID = random.choice(self.fragments.keys())
if ID in map_bottom_top:
order = [ map_bottom_top[ID]['ID_top_match'], ID ]
else:
order = [ ID, map_top_bottom[ID]['ID_bottom_match']]
while len(order) < len(self.fragments): # is this one hacky?
end = order[-1]
start = order[0]
if end in map_top_bottom:
order.append(map_top_bottom[end]['ID_bottom_match'])
elif start in map_bottom_top:
order.insert(0, map_bottom_top[start]['ID_top_match'])
print 'order', order
return order
def assemble(self):
'''
assembles the sequence given the info on the pairs and the order
'''
map_top_bottom, map_bottom_top = self._find_matching_fragment_pairs()
order = self._determine_order(map_top_bottom, map_bottom_top)
sequence = self.fragments[order[0]]
insert_index = 0
print ''.join(sequence)
for ID in order[:-1]: # last fragment ID is not in map_top_bottom
ID_bottom_match = map_top_bottom[ID]['ID_bottom_match']
match_index = map_top_bottom[ID]['match_index']
insert_index += match_index
# print '----'
# print ''.join(sequence[insert_index: insert_index + 5])
# print ''.join(self.fragments[ID_bottom_match][0:5])
sequence[insert_index:] = self.fragments[ID_bottom_match]
print '_'*insert_index + ''.join(self.fragments[ID_bottom_match])
# Final result
return ''.join(sequence)
|
#
# Converted to Python by Eric Shen <ericshen@berkeley.edu>
#
#
import cv2
import numpy as np
import os
import argparse
import logging
log_format = '%(created)f:%(levelname)s:%(message)s'
logging.basicConfig(level=logging.DEBUG, format=log_format) # log to file filename='example.log',
TAG = "laplace-recog:"
def main(capture, sandbox_send, sandbox_recv, files):
logging.debug(TAG + "inside main")
frame_num = 0
while True:
logging.debug(TAG + "before reading frame")
retval, frame = capture.read()
if not retval:
break
logging.debug(TAG + "after reading frame")
frame_num += 1
planes = cv2.split(frame)
for i in range(len(planes)):
laplace = cv2.Laplacian(planes[i], 10)
planes[i] = cv2.convertScaleAbs(laplace)
colorlaplace = cv2.merge(planes)
logging.debug(TAG + "sending obj:num %d" % frame_num)
sandbox_send.send_pyobj((frame_num, colorlaplace))
|
from __future__ import print_function
from __future__ import division
import sys
import time
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import BasicLSTMCell, GRUCell
import properties as p
class ModelSentiment():
def __init__(self, word_embedding=None, max_input_len=None, using_compression=False, book=None, words=None, we_trainable=False, \
learning_rate = 0.001, lr_decayable=True, using_bidirection=False, fw_cell='basic', bw_cell='gru'):
self.word_embedding = word_embedding
self.we_trainable = we_trainable
self.max_input_len = max_input_len
self.using_compression = using_compression
self.book = book
self.words = words
self.learning_rate = learning_rate
self.lr_decayable = lr_decayable
self.using_bidirection = using_bidirection
self.fw_cell = fw_cell
self.bw_cell = bw_cell
def set_data(self, train, valid):
self.train = train
self.valid = valid
def set_embedding(self):
self.word_embedding = word_embedding
def init_ops(self):
with tf.device('/%s' % p.device):
# init memory
self.add_placeholders()
# init model
self.output = self.inference()
# init prediction step
self.pred = self.get_predictions(self.output)
# init cost function
self.calculate_loss = self.add_loss_op(self.output)
# init gradient
self.train_step = self.add_training_op(self.calculate_loss)
self.merged = tf.summary.merge_all()
def add_placeholders(self):
"""add data placeholder to graph """
self.input_placeholder = tf.placeholder(tf.int32, shape=(
p.batch_size, self.max_input_len))
self.input_len_placeholder = tf.placeholder(
tf.int32, shape=(p.batch_size,))
self.pred_placeholder = tf.placeholder(
tf.int32, shape=(p.batch_size,))
# place holder for start vs end position
self.dropout_placeholder = tf.placeholder(tf.float32)
self.iteration = tf.placeholder(tf.int32)
def inference(self):
"""Performs inference on the DMN model"""
# set up embedding
embeddings = None
if not self.using_compression:
embeddings = tf.Variable(
self.word_embedding.astype(np.float32), name="Embedding", trainable=self.we_trainable)
with tf.variable_scope("input", initializer=tf.contrib.layers.xavier_initializer()):
print('==> get input representation')
word_reps = self.get_input_representation(embeddings)
word_reps = tf.reduce_mean(word_reps, axis=1)
# print(word_reps)
with tf.variable_scope("hidden", initializer=tf.contrib.layers.xavier_initializer()):
# output = tf.layers.dense(word_reps,
# p.embed_size,
# activation=tf.nn.tanh,
# name="h1")
output = tf.layers.dense(word_reps,
p.hidden_size,
activation=tf.nn.tanh,
name="h2")
output = tf.nn.dropout(output, self.dropout_placeholder)
output = tf.layers.dense(output,
p.sentiment_classes,
name="fn")
return output
def build_book(self):
b = tf.Variable(self.book, name="book", trainable=False)
w = tf.Variable(self.words, name="words", trainable=False)
return b, w
def get_input_representation(self, embeddings):
"""Get fact (sentence) vectors via embedding, positional encoding and bi-directional GRU"""
inputs = None
if self.using_compression:
b_embedding, w_embedding = self.build_book()
# from code words => build one hot
# B x L x M: batchsize x length_sentence
d = tf.nn.embedding_lookup(w_embedding, self.input_placeholder)
# d_ is flatten to make one hot vector then reshape to cube later
d_ = tf.reshape(d, [-1])
# => B x L x M x K
d_ = tf.one_hot(d_, depth=p.code_size, axis=-1)
d_ = tf.reshape(d_, [p.batch_size * self.max_input_len, p.book_size, p.code_size]);
# => M x B * L x K => B * L x K
inputs = tf.reduce_sum(tf.matmul(tf.transpose(d_, perm=[1, 0, 2]), b_embedding), axis=0);
inputs = tf.reshape(tf.reshape(inputs, [-1]), [p.batch_size, self.max_input_len, p.embed_size])
else:
# get word vectors from embedding
inputs = tf.nn.embedding_lookup(embeddings, self.input_placeholder)
# chunking_len = int(self.max_input_len / p.fixation)
# inputs = tf.reshape(tf.reshape(inputs, [-1]), [p.batch_size, chunking_len, p.fixation * p.embed_size])
# use encoding to get sentence representation plus position encoding
# (like fb represent)
if self.fw_cell == 'basic':
fw_cell = BasicLSTMCell(p.embed_size)
else:
fw_cell = GRUCell(p.embed_size)
if not self.using_bidirection:
# outputs with [batch_size, max_time, cell_bw.output_size]
outputs, _ = tf.nn.dynamic_rnn(
fw_cell,
inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder,
)
else:
if self.bw_cell == 'basic':
back_cell = BasicLSTMCell(p.embed_size)
else:
back_cell = GRUCell(p.embed_size)
outputs, _ = tf.nn.bidirectional_dynamic_rnn(
fw_cell,
back_cell,
inputs,
dtype=np.float32,
sequence_length=self.input_len_placeholder,
)
outputs = tf.concat(outputs, 2)
return outputs
def add_loss_op(self, output):
"""Calculate loss"""
loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=output, labels=self.pred_placeholder))
# add l2 regularization for all variables except biases
for v in tf.trainable_variables():
if not 'bias' in v.name.lower():
loss += p.l2 * tf.nn.l2_loss(v)
tf.summary.scalar('loss', loss)
return loss
def add_training_op(self, loss):
"""Calculate and apply gradients"""
if self.lr_decayable:
lr = tf.train.exponential_decay(learning_rate=p.lr, global_step=self.iteration, decay_steps=p.lr_depr, decay_rate=p.decay_rate)
else:
lr = self.learning_rate
opt = tf.train.AdamOptimizer(learning_rate=lr)
gvs = opt.compute_gradients(loss)
train_op = opt.apply_gradients(gvs)
return train_op
def get_predictions(self, output):
pred = tf.nn.softmax(output)
pred = tf.argmax(pred, 1)
return pred
def run_epoch(self, session, data, num_epoch=0, train_writer=None, train_op=None, verbose=2, train=False):
dp = p.dropout
if train_op is None:
train_op = tf.no_op()
dp = 1
total_steps = len(data[0]) // p.batch_size
total_loss = []
accuracy = 0
# shuffle data
r = np.random.permutation(len(data[0]))
ct, ct_l, pr = data
ct, ct_l, pr = np.asarray(ct, dtype=np.float32), np.asarray(ct_l, dtype=np.float32), np.asarray(pr, dtype=np.float32)
ct, ct_l, pr = ct[r], ct_l[r], pr[r]
for step in range(total_steps):
index = range(step * p.batch_size,
(step + 1) * p.batch_size)
feed = {self.input_placeholder: ct[index],
self.input_len_placeholder: ct_l[index],
self.pred_placeholder: pr[index],
self.dropout_placeholder: dp,
self.iteration: num_epoch}
pred_labels = pr[step * p.batch_size:(step + 1) * p.batch_size]
loss, pred, summary, _ = session.run(
[self.calculate_loss, self.pred, self.merged, train_op], feed_dict=feed)
if train_writer is not None:
train_writer.add_summary(
summary, num_epoch * total_steps + step)
accuracy += (np.sum(pred == pred_labels)) / float(len(pred_labels))
total_loss.append(loss)
if verbose and step % verbose == 0:
sys.stdout.write('\r{} / {} : loss = {}'.format(
step, total_steps, np.mean(total_loss)))
sys.stdout.flush()
if verbose:
sys.stdout.write('\r')
avg_acc = 0.
if total_steps:
avg_acc = accuracy / float(total_steps)
return np.sum(total_loss), avg_acc
|
# Generated by Django 2.2.3 on 2019-08-19 19:03
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('blacklist', '0003_auto_20190813_1039'),
]
operations = [
migrations.AddField(
model_name='modelblacklist',
name='endereco_completo',
field=models.CharField(blank=True, max_length=250, null=True),
),
]
|
# stdlib
from typing import Optional
# relative
from .....common.message import ImmediateSyftMessageWithoutReply
from .....common.serde.serializable import serializable
from .....common.uid import UID
@serializable(recursive_serde=True)
class RegisterChildNodeMessage(ImmediateSyftMessageWithoutReply):
__attr_allowlist__ = ["lookup_id", "child_node_client_address", "address", "id"]
def __init__(
self,
lookup_id: UID,
child_node_client_address: UID,
address: UID,
msg_id: Optional[UID] = None,
):
super().__init__(address=address, msg_id=msg_id)
self.lookup_id = lookup_id
self.child_node_client_address = child_node_client_address
|
#
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
#
"""
Base class for remote file shares.
"""
import logging
from abc import ABCMeta, abstractmethod
from typing import Any, Dict, Optional
from mlos_bench.services.base_service import Service
from mlos_bench.services.types.fileshare_type import SupportsFileShareOps
_LOG = logging.getLogger(__name__)
class FileShareService(Service, SupportsFileShareOps, metaclass=ABCMeta):
"""
An abstract base of all file shares.
"""
def __init__(self, config: Optional[Dict[str, Any]] = None,
global_config: Optional[Dict[str, Any]] = None,
parent: Optional[Service] = None):
"""
Create a new file share with a given config.
Parameters
----------
config : dict
Free-format dictionary that contains the file share configuration.
It will be passed as a constructor parameter of the class
specified by `class_name`.
global_config : dict
Free-format dictionary of global parameters.
parent : Service
Parent service that can provide mixin functions.
"""
super().__init__(config, global_config, parent)
self.register([
self.download,
self.upload,
])
@abstractmethod
def download(self, remote_path: str, local_path: str, recursive: bool = True) -> None:
"""
Downloads contents from a remote share path to a local path.
Parameters
----------
remote_path : str
Path to download from the remote file share, a file if recursive=False
or a directory if recursive=True.
local_path : str
Path to store the downloaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), download the entire directory tree.
"""
_LOG.info("Download from File Share %s recursively: %s -> %s",
"" if recursive else "non", remote_path, local_path)
@abstractmethod
def upload(self, local_path: str, remote_path: str, recursive: bool = True) -> None:
"""
Uploads contents from a local path to remote share path.
Parameters
----------
local_path : str
Path to the local directory to upload contents from.
remote_path : str
Path in the remote file share to store the uploaded content to.
recursive : bool
If False, ignore the subdirectories;
if True (the default), upload the entire directory tree.
"""
_LOG.info("Upload to File Share %s recursively: %s -> %s",
"" if recursive else "non", local_path, remote_path)
|
# girilen iki sayının en küçük ortak katını bulacak.
# "... ve ... sayılarının EKOKu = ...." şeklinde sonucu söyleyecek.
# İPUCU : önce iki sayıdan hangisinin daha küçük olduğunu bulup, ordan başlanabilir.
sayı1 = int(input("bir sayı giriniz"))
sayı2 = int(input("bir sayı daha giriniz"))
sayac = 0
kucuksayı = 0
buyuksayı = 0
if(sayı1 > sayı2):
buyuksayı = sayı1
kucuksayı = sayı2
else:
buyuksayı = sayı2
kucuksayı = sayı1
while True:
sayac = sayac + 1
ekok = sayac*buyuksayı
if(ekok%kucuksayı == 0):
break
print("{} ve {} sayılarının ekoku = {}".format(sayı1,sayı2,ekok))
ebob = 1
for x in range(2,kucuksayı):
while(buyuksayı % x == 0 and kucuksayı % x == 0):
ebob = ebob*x
buyuksayı = buyuksayı/x
kucuksayı = kucuksayı/x
print("{} ve {} sayılarının ebobu = {}".format(sayı1,sayı2,ebob)) |
from pymata_aio.constants import Constants
from Lib import Leonardo
import sys
import time
board = Leonardo.Leonardo()
SERVO_PIN = 10
def setup():
board.servo_config(SERVO_PIN)
board.sleep(0.2)
board.analog_write(SERVO_PIN, 0)
board.sleep(0.5)
def loop():
print("Servo sweep ( 0 to 180 degree )")
sys.stdout.flush()
# The range of motion for some servos isn't all the way from 0 degrees to 180 degrees, change as needed.
for pos in range(0, 180, 5): # Start=0 degrees, Finish=180 degree, (Increment=1 degree which is the default)
board.analog_write(SERVO_PIN, pos)
board.sleep(0.1)
print("Servo sweep ( 180 to 0 degree )")
sys.stdout.flush()
for pos in range(180, 0, -5): # Start=180 degrees, Finish=0 degrees, Increment=-1 degrees (moving down)
board.analog_write(SERVO_PIN, pos)
board.sleep(0.1)
if __name__ == "__main__":
setup()
while True:
loop()
|
import pytest
from polyglotdb import CorpusContext
def test_generate_hierarchy(acoustic_config):
with CorpusContext(acoustic_config) as c:
h = c.generate_hierarchy()
assert (h._data == c.hierarchy._data)
def test_generate_hierarchy_subannotations(subannotation_config):
with CorpusContext(subannotation_config) as c:
h = c.generate_hierarchy()
assert (h._data == c.hierarchy._data)
assert (h.subannotations['phone'] == c.hierarchy.subannotations['phone'])
|
import unittest
from entity.manufacturer import Manufacture
class ManufactureTestCase(unittest.TestCase):
def setUp(self) -> None:
self.manufacture = Manufacture()
self.name = "Lol"
def test_name(self):
self.manufacture.set_name(self.name)
self.assertEqual(self.name, self.manufacture.get_name())
def test_name_type_exception(self):
# solution 1
with self.assertRaises(TypeError):
self.manufacture.set_name(123)
# solution 2
self.assertRaises(TypeError, self.manufacture.set_name, 123)
if __name__ == "__main__":
unittest.main()
|
#!/usr/local/bin/python3
import unittest
from report_processor import ReportProcessor
# Full integration test that runs the latest two reports.
# class IntegrationTest(unittest.TestCase):
def test1():
url = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days"
reportProcessor = ReportProcessor(url)
reportProcessor.main(1)
# Full integration test that run the scenario when there are additional lines in the old report.
def test2():
dir_url = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days"
url1 = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days/PUB_TxLimitsOutage0to2Days_20201009_v75.xml"
url2 = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days/PUB_TxLimitsOutage0to2Days_20201009_v71.xml"
reportProcessor = ReportProcessor(dir_url)
reportProcessor._process_reports_at(url1, url2)
# Full integration test that run the scenario when there are additional lines in the new report.
def test3():
dir_url = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days"
url1 = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days/PUB_TxLimitsOutage0to2Days_20201009_v67.xml"
url2 = "http://reports.ieso.ca/public/TxLimitsOutage0to2Days/PUB_TxLimitsOutage0to2Days_20201009_v63.xml"
reportProcessor = ReportProcessor(dir_url)
reportProcessor._process_reports_at(url1, url2)
test1()
test2()
test3() |
from typing import Callable
from random import choice
class Board:
"""Tic-Tac-Toe board"""
__board: [str, ...] = ['_', '_', '_',
'_', '_', '_',
'_', '_', '_']
wins_combinations: ((int, int, int), ...) = (
(0, 1, 2), (3, 4, 5), (6, 7, 8), # rows
(0, 3, 6), (1, 4, 7), (2, 5, 8), # columns
(0, 4, 8), (2, 4, 6), # diagonals
)
# print
def __repr__(self) -> str:
return f'''\t\t---------
| {self.__board[0]} {self.__board[1]} {self.__board[2]} |
| {self.__board[3]} {self.__board[4]} {self.__board[5]} |
| {self.__board[6]} {self.__board[7]} {self.__board[8]} |
---------'''
def clear(self) -> None:
"""Reset board state."""
self.__board = ['_', '_', '_', '_', '_', '_', '_', '_', '_']
def __len__(self) -> int:
"""Return the number of empty cells."""
return self.__board.count('_')
def winner(self) -> str or None:
"""Return winner if exist, else None"""
# before first 5 moves there is no winner
if self.__len__() in (9, 8, 7, 6):
return None
# after 5 moves check if there is a winner
for w in self.wins_combinations:
if all(self.__board[i] == 'X' for i in w):
return 'X wins'
if all(self.__board[i] == 'O' for i in w):
return 'O wins'
# 0 moves left + no winner = 'Draw'
if self.__len__() == 0:
return 'Draw'
return None
def __getitem__(self, i: int) -> str:
return self.__board[i]
def __setitem__(self, i: int, value: str) -> None:
self.__board[i] = value
def is_free(self, i: int) -> bool:
return self.__board[i] == '_'
def __iter__(self) -> str:
for i in self.__board:
yield i
class User:
@staticmethod
def move(_board: Board, current_turn='X') -> int:
"""Gets coordinates from user input"""
while True:
try:
x, y = [int(i) for i in input('Enter the coordinates: ').split()]
except (NameError, ValueError):
print('You should enter numbers!')
continue
if not 0 < x < 4 or not 0 < y < 4:
print('Coordinates should be from 1 to 3!')
continue
cell: int = ((3 - y) * 3) + (x - 1)
if not _board.is_free(cell):
print('This cell is occupied! Choose another one!')
continue
return cell
class RandomAI:
@staticmethod
def rand_move(_board: Board) -> int:
"""Return random free cell."""
return choice([i for i, _ in enumerate(_board) if _board.is_free(i)])
def move(self, _board: Board, current_turn='X') -> int:
"""Returns a random integer from 0 to 8."""
print('Making move level "easy"')
return self.rand_move(_board=_board)
class MediumAI(RandomAI):
def move(self, _board: Board, current_turn='X') -> int:
"""
if it can win in one move -> do it
stop the enemy from winning -> do it
else -> returns a random free cell
"""
print('Making move level "medium"')
for win_combination in _board.wins_combinations:
win_line = [_board[i] for i in win_combination]
try:
# order: first check if I can win, next if I can stop enemy
order = ('X', 'O') if current_turn == 'X' else ('O', 'X')
for player in order:
if win_line.count(player) == 2:
cell = win_combination[win_line.index('_')]
return cell
except:
print('Something is wrong')
continue
return self.rand_move(_board=_board)
class HardAI(MediumAI):
outcomes: {str: (int, int), } = {
'X wins': (-1, 0),
'O wins': (1, 0),
'Draw': (0, 0),
}
def move(self, _board: Board, current_turn='X') -> int:
print('Making move level "hard"')
if current_turn == 'X':
_, cell = self.min(_board, -2, 2)
else:
_, cell = self.max(_board, -2, 2)
return cell
def max(self, _board, alpha, beta) -> (int, int):
# -1 - loss
# 0 - draw
# 1 - win
maxv: int = -2
cell: int = -1
result = _board.winner()
if result:
return self.outcomes[result]
for i, item in enumerate(_board):
if item == '_':
_board[i] = 'O'
m, _ = self.min(_board, alpha, beta)
if m > maxv:
maxv = m
cell = i
_board[i] = '_'
if maxv >= beta:
return maxv, cell
if maxv > alpha:
alpha = maxv
return maxv, cell
def min(self, _board, alpha, beta) -> (int, int):
minv: int = 2
cell: int = -1
result = _board.winner()
if result:
return self.outcomes[result]
for i, item in enumerate(_board):
if item == '_':
_board[i] = 'X'
m, _ = self.max(_board, alpha, beta)
if m < minv:
minv = m
cell = i
_board[i] = '_'
if minv <= alpha:
return minv, cell
if minv < beta:
beta = minv
return minv, cell
def game(board=Board(),
player1=User(), # 'X' player, turn_switcher = True
player2=RandomAI(), # 'O' player, turn_switcher = False
starts_game='X') -> str:
"""Returns result of the game"""
turn_switcher: bool = True if starts_game == 'X' else 'O'
board.clear()
while True:
current_turn: str = 'X' if turn_switcher else 'O'
print(board)
winner = board.winner()
if winner is not None:
return winner
cell = player1.move(board, current_turn=current_turn) if turn_switcher else \
player2.move(board, current_turn=current_turn)
board[cell] = current_turn
turn_switcher = not turn_switcher
def main():
difficulties: {str: Callable, } = {
'user': User,
'easy': RandomAI,
'medium': MediumAI,
'hard': HardAI
}
while True:
command: [str, ...] = input('Input command: ').lower().split()
if 'exit' in command:
exit('Bye!')
try:
p1 = difficulties[command[1]]()
p2 = difficulties[command[2]]()
print(game(player1=p1, player2=p2))
except (KeyError, IndexError, ValueError):
print("Unknown option")
continue
if __name__ == "__main__":
main()
|
# --*-- coding : utf-8 --*--
# Project : python_lemon_作业
# Current file : lemon_190920_作业.py
# Author : 大壮
# Create time : 2019-09-20 22:14
# IDE : PyCharm
# TODO 成长很苦,进步很甜,加油!
import openpyxl
# 第一:excel类封装需要提供以下功能:
# 1、选择表单功能
# 2、读取一个单元格的数据功能
# 3、读取一行数据 功能
# 4、读取表单中所有数据功能
# 5、往单元格中写入数据功能
# 6、保存数据功能
class ExcelManual(object):
def __init__(self, file, ):
self.file = file
def open_file(self):
""" 打开文件 """''
wb = openpyxl.load_workbook(self.file)
return wb
def get_sheet(self, indexes):
""" 选择表单功能 根据索引 """''
sheet = self.open_file().worksheets[indexes]
return sheet
def get_sheet_two(self, indexes):
""" 选择表单功能 根据名字获取表单,使用这个没有提示... """
sheet = self.open_file()[indexes]
return sheet
def one_cell(self, indexes, coordinate):
""" 读取一个单元格的数据功能
indexes 传表单索引
coordinate 坐标轴
"""''
b = []
for i in coordinate:
b.append(i)
cell = self.get_sheet_two(indexes).cell(eval(b[0]), eval(b[-1])).value
return cell
def row_cell(self, indexes, coordinate):
""" 读取一行数据 功能
"""''
cell = self.get_sheet(indexes)[coordinate]
b = []
for i in cell:
b.append(i.value)
return b
def all_data(self, indexes):
""" 读取表单中所有数据功能
indexes 表单索引
"""''
data = self.get_sheet(indexes).rows
data_list = list(data) #
# 循环遍历数据
a = []
for i in data_list:
b = []
for j in i:
b.append(j.value)
a.append(b)
return a
def all_data_two(self, indexes, row, column):
""" 读取表单中所有数据功能
indexes 表单索引
hang 行
lie 列
"""''
# 获取最大行数
max_row = self.get_sheet(indexes).max_row
# 获取最大列数
max_column = self.get_sheet(indexes).max_column
a = []
for i in range(row, max_row+1): # 行
b = []
for j in range(column, max_column+1): # 列
b.append(self.get_sheet(indexes).cell(i, j).value)
a.append(b)
return a
def write_file(self, indexes, coordinate, revised):
""" 往单元格中写入数据功能
indexes 表单索引
coordinate 修改数据的坐标
revised 修改的数据
"""
b = []
for i in coordinate:
b.append(i)
wb = openpyxl.load_workbook(self.file) # 文件路径和名称
sheet = wb.worksheets[indexes] # 表单
sheet.cell(eval(b[0]), eval(b[-1])).value = revised # 坐标和数据
wb.save(self.file) # 保存
return revised
def save_file(self):
""" 保存文件, 关闭文件 """''
self.open_file().save(self.file)
self.open_file().close()
xlsx = ExcelManual(r'D:\data.xlsx')
print('根据索引选择表单', xlsx.get_sheet(2)) # 根据索引选择表单
# print(xlsx.get_sheet_two('Sheet3')) # 根据名字获取表单
# 读取一个单元格的数据功能 第一个参数:表单名字 第二个参数:单个单元格的坐标
print('读取一个单元格的数据功能', xlsx.one_cell('Sheet3', '1,2'))
# 读取一行单元格的数据功能 第一个参数:表单索引 第二个参数:第几行
print('读取一行数据 功能', xlsx.row_cell(2, 1))
# 读取表单中所有数据功能 参数:表单索引
print('表单中所有数据:方法一', xlsx.all_data(2))
# 读取表单中所有数据功能 参数:表单索引
# 第一个参数表单的索引, 第二个:行, 第三个:列
print('表单中所有数据456:方法二', xlsx.all_data_two(indexes=0, row=2, column=2))
# 往单元格中写入数据功能 表单索引 修改数据的坐标 修改的数据
# print(f'保存数据为:', xlsx.write_file(1, '1,1', '修改的数据'))
# 保存文件, 关闭文件 一定确保文件没有在其他地方打开,否则报错
# xlsx.save_file()
'''
# 第二:请设计测试数据,对封装的excel类功能进行测试。
# excel中的具体内容,由各位同学自由发挥
# 导入HTMLTestRunnerNew
from HTMLTestRunnerNew import HTMLTestRunner # 生成HTML文件调用模块
import os # 使用os模块获取路径
import unittest # python自带单元测试模块
# 初始化 loader 测试加载器(TestLoader)
loader = unittest.TestLoader()
start_dir = os.path.dirname(os.path.abspath(__file__))
suite = loader.discover(start_dir) # 找到文件路径运行
with open('测试报告_001.html', 'wb') as f:
""" stream 文件流,可以传文件 verbosity 详细程度
title=文件标题 description=文件的注释 tester=测试人员 """
runner = HTMLTestRunner(f,
verbosity=2,
title='我是文件标题',
tester='我是测试人员',
description='我是文件注释')
# 运行
runner.run(suite)
'''
|
from datetime import timedelta
from unittest.mock import Mock
import visiology_py.datacollection as dc
from visiology_py.decorators import cached, retried, decorate_api
def exp(x: int) -> float:
return float(0.1 * (2 ** x))
decorate_api(
dc.ApiV2,
retried(max_tries=1, timeout_function=exp),
)
def test_cached_caches_calls() -> None:
function = Mock()
function.__name__ = "get_dimension_attributes"
cached_function = cached(time_to_live=timedelta(seconds=1))(function)
cached_function()
cached_function()
function.assert_called_once()
def test_cached_do_not_mix_calls() -> None:
function = Mock()
function.__name__ = "get_dimension_elements"
cached_function = cached(time_to_live=timedelta(seconds=1))(function)
cached_function("self", 1)
cached_function("self", 2)
assert function.call_count == 2
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.