index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
24,900 | 568d7d6507d43efd6f067b40310837a068ea4b4a | class UnionFind():
def __init__(self, n):
self.par = [i for i in range(n+1)]
self.rank = [0] * (n+1)
def find(self, x):
if self.par[x] == x:
return x
else:
self.par[x] = self.find(self.par[x])
return self.par[x]
def unite(self, x, y):
x = self.find(x)
y = self.find(y)
if self.rank[x] < self.rank[y]:
self.par[x] = y
else:
self.par[y] = x
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def same(self, x, y):
return self.find(x) == self.find(y)
N,M=map(int,input().split())
city=UnionFind(N)
for i in range(M):
A,B=map(int,input().split())
city.unite(A,B)
ck=[0]*(N+10)
for i in range(1,N+1):
root=city.find(i)
if ck[root]==0:
ck[root]=1
#print(city.par)
#print(ck)
Ans=sum(ck)-1
print(Ans)
|
24,901 | edf17e047974204b77beca0fcd2a22367db2f466 | from distutils.core import setup, Extension
try:
import py2exe
except:
pass
import sys
import glob
import os
import shutil
data = [
'*.txt',
'data/*',
]
for d in glob.glob('data/*'):
if '_wav' not in d:
data.append('%s/*'%d)
print data
src = [
'*.py',
'*.c',
'*.h',
'*.i',
]
cmd = sys.argv[1]
if cmd in ('sdist'):
f = open("MANIFEST.in","w")
for l in data: f.write("include "+l+"\n")
for l in src: f.write("include "+l+"\n")
f.close()
if cmd in ('sdist','build'):
setup(
name='wiljafjord',
version='1.0',
description='Colonel Wiljafjord and the Tarbukas Tyranny',
author='Phil Hassey',
author_email='philhassey@gmail.com',
url='http://www.imitationpickles.org/',
)
if cmd in ('py2exe',):
setup(
options={'py2exe':{
'dist_dir':'dist',
'dll_excludes':['_dotblas.pyd','_numpy.pyd']
}},
windows=[{
'script':'main.py',
#'icon_resources':[(1,'icon.ico')],
}],
)
if cmd in ('build',):
for fname in glob.glob("build/lib*/*.so"):
shutil.copy(fname,os.path.basename(fname))
for fname in glob.glob("build/lib*/*.pyd"):
shutil.copy(fname,os.path.basename(fname))
if cmd in ('py2exe',):
for gname in data:
for fname in glob.glob(gname):
dname = os.path.join('dist',os.path.dirname(fname))
try:
os.mkdir(dname)
except:
'mkdir failed: '+dname
if not os.path.isdir(fname):
shutil.copy(fname,dname)
|
24,902 | 4913b37cb67c5331b047eaa645f34750c6d8fcdd | import unittest
from cards import card_manager
class TestCards(unittest.TestCase):
'''We are buiilding a card deck and we are supposed to find all 52 cards
'''
def test_tabinet_card_deck(self):
cm= card_manager.CardManager()
cm.generate_deck()
counter=0
points=0
values=0
alternantValues=0
for item in cm.main_deck:
counter+=1
values+=item.value
points+=item.points
alternantValues+=item.alternantValue
self.assertEqual(len(cm.main_deck),52)
self.assertEqual(values,376)
self.assertEqual(alternantValues,44)
self.assertEqual(points,21)
if __name__ == '__main__':
unittest.main() |
24,903 | b60291f5093f413df43da2091f57708cc63ffcd4 | temp = int(input("Qual a temperatura em graus C "))
conversao = (temp * 9/5) + 32
print("{}°C são {}°F".format(temp, conversao)) |
24,904 | 759cdb87c6a601deed4e4ec9085e30199949ee2b | #!/usr/bin/python
def partition(inlist, low, high):
pivot = inlist[low]
while(low<high):
while(low<high and inlist[high]>=pivot):
high -= 1
inlist[low] = inlist[high]
while(low<high and inlist[low]<=pivot):
low += 1
inlist[high] = inlist[low]
inlist[low]=pivot
return low
def quick_sort(inlist, low, high):
idx = 0
if low<high:
idx = partition(inlist, low, high)
quick_sort(inlist, low, idx)
quick_sort(inlist, idx+1, high)
def Q_sort(inlist):
high = len(inlist) - 1
quick_sort(inlist, 0, high)
return inlist
if __name__ == "__main__":
t = [ 2, 3, 1, 2, 2, 4, 5, 3, 8 , 7]
print Q_sort(t)
|
24,905 | 959f7d6400e79ff8cefffd8395e752365a0725a5 | def merge_sort(a):
l = len(a)
if l > 1:
mid = l // 2
L = a[:mid]
R = a[mid:]
merge_sort(L)
merge_sort(R)
i = j = k = 0
while i < len(L) and j < len(R):
if L[i] <= R[j]:
a[k] = L[i]
i += 1
else:
a[k] = R[j]
j += 1
k += 1
while i < len(L):
a[k] = L[i]
i += 1
k += 1
while j < len(R):
a[k] = R[j]
j += 1
k += 1
a = [5,4,6,2,1]
merge_sort(a)
print(a)
|
24,906 | 416b1064f31a862bb819b1893c7ed37db0e7178f | import os
import bz2
import glob
import shutil
import subprocess
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("-p","--prefix",help="prefix", required=True)
args = parser.parse_args()
def _comp_sub(filename):
print 'Compressing {0}'.format(filename)
job_args = "gzip -6 {0}".format(filename)
job = subprocess.Popen(job_args.split())
job.wait()
jobdirs = filter(os.path.isdir, glob.glob("{0}*".format(args.prefix)))
scratch = os.getcwd()
for job in jobdirs:
print job
os.chdir(job)
#traj_files = glob.glob("*.traj.xyz")
traj_files = glob.glob("crack_traj.xyz")
print traj_files
for traj in traj_files:
_comp_sub(traj)
os.chdir(scratch)
|
24,907 | 0e6d14fd9aeda28602eda7366366a954ffcf6943 | from datetime import datetime
import json
import os
import sys
import time
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.ui import WebDriverWait
_SESSION_FILE = '.euler-session.json'
class BadSessionException(Exception):
pass
def load_session():
with open(_SESSION_FILE, 'rt') as session_file:
return json.loads(session_file.read())
def save_session(session):
with open(_SESSION_FILE, 'wt') as session_file:
session_file.write(json.dumps(session))
def drive_login(password=None):
if password is None:
password = input('euler password > ')
browser = webdriver.Chrome()
browser.get('https://projecteuler.net/sign_in')
browser.find_element_by_id('username').send_keys('indraastra')
browser.find_element_by_id('password').send_keys(password)
browser.find_element_by_id('remember_me').click()
browser.find_element_by_id('captcha').click()
# Wait until login has completed.
print('Awaiting login...')
wait = WebDriverWait(browser, 10)
wait.until(EC.url_changes(browser.current_url))
print('Login completed!')
session = {
'password': password,
'cookies': browser.get_cookies()
}
for cookie in browser.get_cookies():
if 'expiry' in cookie:
session['expiry'] = cookie['expiry']
#session['cookies'] = [cookie]
browser.quit()
return session
def restore_session():
if not os.path.exists(_SESSION_FILE):
print(f'{_SESSION_FILE} not found; login required!')
session = drive_login()
save_session(session)
session = load_session()
expiry = datetime.fromtimestamp(session.get('expiry', 0))
if not expiry or datetime.now() >= expiry:
print(f'Session expired at {expiry}; login required!')
session = drive_login(session['password'])
save_session(session)
print('Session loaded!')
return session
def drive_submit(session, problem, solution):
chrome_options = Options()
chrome_options.add_argument('--headless')
browser = webdriver.Chrome(options=chrome_options)
browser.get(f'https://projecteuler.net/')
browser.delete_all_cookies()
for cookie in session['cookies']:
browser.add_cookie(cookie)
browser.get(f'https://projecteuler.net/problem={problem}')
browser.find_element_by_id('guess').send_keys(solution)
browser.find_element_by_xpath('//input[@type="submit"]').click()
# Wait until submit has completed.
print('Awaiting submission...')
wait = WebDriverWait(browser, 30)
wait.until(EC.text_to_be_present_in_element(
(By.ID, 'content'), 'answer'))
print('>>>', browser.find_element_by_id('content').text)
browser.quit()
if __name__ == '__main__':
if len(sys.argv) != 3:
print('Usage: submit.py <problem> <solution>')
sys.exit(1)
session = restore_session()
drive_submit(session, sys.argv[1], sys.argv[2])
|
24,908 | 8781ed4748b942a1a34bad0467d98eddd9caae3f | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
__author__ = 'Usuário'
import gensim.matutils
import numpy as np
from Parser import *
from Filter import *
from Cleaner import *
from Learners import *
from stopwords import *
import timeit
import sklearn
import sklearn.feature_extraction.text as txtTools #.TfidfTransformer
import os.path
import nltk
class SklearnTfIdf:
def __init__(self, corpus, idf=True, only_presence=False, norm=None):
self.data_sparse = gensim.matutils.corpus2csc(corpus)
self.tf_idf_transformer = txtTools.TfidfTransformer(use_idf=idf, norm=norm)
self.data = self.tf_idf_transformer.fit_transform(self.data_sparse.T)
# self.data = self.tf_idf_transformer.transform(self.data_sparce.T)
self.only_presence = only_presence
if self.only_presence:
sklearn.preprocessing.binarize(self.data, copy=False)
# sklearn.preprocessing.Normalizer(copy=False).transform(self.data)
def get_transformer(self):
return self.tf_idf_transformer
def get_features(self):
return self.data
def get_corpus_sparce_rep(self):
return self.data_sparse
def apply_transform(self, corpus):
data = gensim.matutils.corpus2csc(corpus, num_terms=self.data_sparse.shape[0])
feat = self.tf_idf_transformer.transform(data.T)
if self.only_presence:
sklearn.preprocessing.binarize(feat, copy=False)
# sklearn.preprocessing.Normalizer(copy=False).transform(feat)
return feat
#####################################
def main():
test_param = False
president = True
read_backup = False
no_below = 2
no_above = 10 ** -1
c = 0.001
# stemmer = nltk.stem.snowball.FrenchStemmer()
stemmer = None
print "Parsing text data..."
if president:
FirstParser = ParserCM(r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\corpus.tache1.learn-stem.utf8")
cvs_log = "..\\..\\var_param_CM.csv"
else:
FirstParser = ParserCM(r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\corpus.movies.learn.utf8")
cvs_log = "..\\..\\var_param_movies.csv"
text_data, Y = FirstParser.get_data()
print "Done parsing\n"
if test_param:
count = 0
with open(cvs_log, "w") as out:
try:
for normalization in [None, 'l1', 'l2']:
for no_below in 10 ** np.arange(1, 4, 1, dtype=np.float64):
for no_above in 10 ** np.arange(-3, 0, 1, dtype=np.float64): #np.arange(0.4, 1.1, 0.1):
print "Cleaning parsed text..."
if president:
Cleaner = GuigueCleaner(text_data, no_below=no_below, no_above=no_above, stoplist=FRStopWords().get_toplist())
else:
Cleaner = GuigueCleaner(text_data, no_below=no_below, no_above=no_above, stoplist=ENStopWords().get_toplist())
cbow_corpus = Cleaner.get_corpus()
print "Done cleaning\n"
print "Getting features..."
tf_idfer = SklearnTfIdf(cbow_corpus, idf=False, only_presence=False, norm=normalization)
X = tf_idfer.get_features()
print no_below, no_above, X.shape[-1]
print "Got\n"
print "Learning SVM model..."
for kernel in [None]:
for c in 10 ** np.arange(-4, 1, dtype=np.float64):
t0 = timeit.default_timer()
# svm = learn_by_svm(X, Y, C=c, kernel=kernel, fit=False, path="..\\..\\svm_dump{}.pkl".format(count))
svm = learn_by_MNbayes(X, Y, alpha=c, fit=False)
scores = svm.cross_val(X, Y, 5, cpus=6)
t1 = timeit.default_timer()
print no_below, no_above, kernel,normalization, c, "Accuracy: %0.2f (+/- %0.2f)" % (scores.mean(), scores.std() * 2), (t1-t0)/60
out.write("{},{},{},{},{},{}\n".format(no_below, no_above,normalization, kernel, c, scores.mean()))
count += 1
except KeyboardInterrupt:
pass
print "Done\n"
exit(0)
else:
print "Cleaning parsed text..."
if president:
Cleaner = GuigueCleaner(text_data, no_below=no_below, no_above=no_above, stoplist=FRStopWords().get_toplist(), stemmer=stemmer)
else:
Cleaner = GuigueCleaner(text_data, no_below=no_below, no_above=no_above, stoplist=ENStopWords().get_toplist())
cbow_corpus = Cleaner.get_corpus()
print "Done cleaning\n"
print "Getting features..."
tf_idfer = SklearnTfIdf(cbow_corpus, idf=True, only_presence=False, norm='l2')
X = tf_idfer.get_features()
print X.shape
print "Got\n"
kernel = "linear"
print "Learning SVM model..."
print datetime.datetime.now().time()
t0 = timeit.default_timer()
if os.path.isfile(r"..\..\svm_dump.pkl") and read_backup:
svm = learn_by_svm()
else:
# svm = learn_by_MNbayes(X, Y, alpha=c)
# svm = learn_by_svm(X, Y, C=c, kernel=kernel)
svm = learn_by_svc(X, Y, C=c)
# svm = learn_by_perceptron(X, Y)
t1 = timeit.default_timer()
print "Learning took: {} min".format((t1-t0)/60)
print "Done\n"
print "Parsing text data..."
if president:
TestParser = ParserCMTest(r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\corpus.tache1.test-stem.utf8")
out_file = r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\pred_CM"
else:
TestParser = ParserCMTest(r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\corpus.movies.test.utf8")
out_file = r"C:\Users\Usuario\Desktop\ENSTA\M2 UPMC\Cours\FDMS\pred_movies"
test_text = TestParser.get_data()
print "Cleaning and getting features"
test_features = tf_idfer.apply_transform(Cleaner.line_list_to_bow(Cleaner.lines_to_words(test_text)))
print test_features.shape
# test_features = scaler.transform(test_features)
print "Prediciton"
Y_pred = svm.predict(test_features)
print "Saving outputs"
with open(out_file, "w") as out:
for pred in Y_pred:
if pred <= 0:
out.write("M\n")
else:
out.write("C\n")
if president:
mf = MedianFilter(vec=Y_pred)
mf.export_data(out_file+"_mf")
print "DONE"
import winsound
Freq = 2500 # Set Frequency To 2500 Hertz
Dur = 1000 # Set Duration To 1000 ms == 1 second
winsound.Beep(Freq,Dur)
if __name__ =='__main__':main() |
24,909 | aa101073b2779fe19489670713aeddc8638b6a3b | # coding=utf-8
import json
import atexit
from zookeeper.zk_client import ZkClient
from kazoo.protocol.states import KazooState
from kazoo.exceptions import NoNodeError, NodeExistsError
from server.instance_config_data import InstanceConfigData
import logging
logger = logging.getLogger(__name__)
class ZkPublisher(ZkClient):
def __init__(self, service_key, instance_path):
super(ZkPublisher, self).__init__()
self._service_key = service_key
self._instance_path = instance_path
self._service_path = '%s/%s' % (self._zk_path, self._service_key)
self._node_path = '%s/%s' % (self._service_path, self._instance_path)
self._instance_config_data = None
def register(self, instance_config_data):
assert isinstance(instance_config_data, InstanceConfigData)
self._instance_config_data = instance_config_data
self._client.add_listener(self._state_listener)
try:
self._ensure_path()
self._client.create(self._node_path, self._serialize_instance_config_data(), ephemeral=True)
except NodeExistsError:
logger.error('Zookeeper node exists, path: %s', self._node_path)
atexit.register(self.stop)
def modify(self, instance_config_data):
assert isinstance(instance_config_data, InstanceConfigData)
self._instance_config_data = instance_config_data
try:
self._ensure_path()
self._client.set(self._node_path, self._serialize_instance_config_data())
except NoNodeError:
logger.error('Zookeeper node does not exists, path: %s', self._node_path)
def _state_listener(self, state):
if state in [KazooState.LOST, KazooState.SUSPENDED]:
self._connection_lost = True
logger.info('(Zk-publisher)Zookeeper connection lost, current state: %s', state)
elif state == KazooState.CONNECTED and self._connection_lost:
# 重新连接
self._client.handler.spawn(self.register, self._instance_config_data)
self._connection_lost = False
logger.info('(Zk-publisher)Zookeeper reconnection, current state: %s', state)
def _ensure_path(self):
self._client.ensure_path(self._service_path)
def _serialize_instance_config_data(self):
if not isinstance(self._instance_config_data, InstanceConfigData):
raise Exception('Zookeeper register invalid service config data')
return json.dumps(self._instance_config_data.to_dict())
|
24,910 | df748bbe52c0a4d9d0771b191f54de3135cc3682 | from envoy import run as envoy_run
from accio.scm import SCMBase
class Git(SCMBase):
def _get_last_pushed_commit(self):
r = envoy_run('git rev-list HEAD')
commits = r.std_out.split('\n')
for commit in commits:
r = envoy_run('git branch -r --contains %s' % commit)
if r.std_out != "":
return commit
return None
def _get_file_content(self, file_path, commit):
r = envoy_run('git show %s:"%s"' % (commit,file_path.replace('\\', '/')))
return r.std_out
def _get_name(self):
r = envoy_run('git config user.name')
return r.std_out.strip()
def _get_email(self):
r = envoy_run('git config user.email')
return r.std_out.strip()
def _get_branch(self):
r = envoy_run('git rev-parse --abbrev-ref HEAD')
return r.std_out.strip()
|
24,911 | b7b3d8f64f0ca805185614b7cf61acc5ee610d4a | '''
URL for challenge: https://www.codewars.com/kata/the-hashtag-generator/train/python
The marketing team is spending way too much time typing in hashtags.
Let's help them with out own Hashtag Generator!
Here's the deal:
It must start with a hashtag (#).
All words must have their first letter capitalized.
If the final result is longer than 140 chars it must return false.
If the input or the result is an empty string it must return false.
Examples
" Hello there thanks for trying my Kata" => "#HelloThereThanksForTryingMyKata"
" Hello World " => "#HelloWorld"
"" => false
'''
def generate_hashtag(s):
if len(s) > 140 or len(s) == 0:
return False
s = s.split()
for x in range (0, len(s)):
s[x] = s[x].title()
s = ''.join(s)
s = '#' + s
return s
print(generate_hashtag(' code wars is nice')) |
24,912 | f62ac27f3c7c06eb6c5c1a64fbaea95e9b78a930 |
from quantum_routines import (generate_empty_initial_state,
generate_mixing_Ham, generate_Ham_from_graph)
from qutip import sesolve, sigmaz, sigmap, qeye, tensor, Options
import settings
import numpy as np
import math
import numba
from tqdm.auto import tqdm, trange
from operator import itemgetter
settings.init()
def generate_signal_fourier(G, rot_init=settings.rot_init,
N_sample=1000, hamiltonian='xy',
tf=100*math.pi):
"""
Function to return the Fourier transform of the average number of
excitation signal
Arguments:
---------
- G: networx.Graph, graph to analyze
- rot_init: float, initial rotation
- N_sample: int, number of timesteps to compute the evolution
- hamiltonian: str 'xy' or 'ising', type of hamiltonian to simulate
- tf: float, total time of evolution
Returns:
--------
- plap_fft: numpy.Ndarray, shape (N_sample,) values of the fourier spectra
- freq_normalized: numpy.Ndarray, shape (N_sample,) values of the
fequencies
"""
assert hamiltonian in ['ising', 'xy']
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=hamiltonian)
rotation_angle_single_exc = rot_init/2.
tlist = np.linspace(0, rotation_angle_single_exc, 200)
psi_0 = generate_empty_initial_state(N_nodes)
H_m = generate_mixing_Ham(N_nodes)
result = sesolve(H_m, psi_0, tlist)
final_state = result.states[-1]
sz = sigmaz()
si = qeye(2)
sp = sigmap()
sz_list = []
sp_list = []
for j in range(N_nodes):
op_list = [si for _ in range(N_nodes)]
op_list[j] = sz
sz_list.append(tensor(op_list))
op_list[j] = sp
sp_list.append(tensor(op_list))
tlist = np.linspace(0, tf, N_sample)
observable = (-2*math.sin(2*rotation_angle_single_exc)
* sum(spj for spj in sp_list)
+ math.cos(2*rotation_angle_single_exc)
* sum(szj for szj in sz_list))
opts = Options()
opts.store_states = True
result = sesolve(H_evol, final_state, tlist,
e_ops=[observable], options=opts)
full_signal = result.expect
signal = full_signal[0].real
signal_fft = np.fft.fft(signal)
freq = np.fft.fftfreq(signal.shape[-1])
freq_normalized = np.abs(freq * N_sample * 2) / (tf / np.pi)
return signal_fft, freq_normalized
@numba.njit
def entropy(p):
"""
Returns the entropy of a discrete distribution p
Arguments:
---------
- p: numpy.Ndarray dimension 1 non-negative floats summing to 1
Returns:
--------
- float, value of the entropy
"""
assert (p >= 0).all()
assert abs(np.sum(p)-1) < 1e-6
return -np.sum(p*np.log(p+1e-12))
@numba.njit
def jensen_shannon(hist1, hist2):
'''
Returns the Jensen Shannon divergence between two probabilities
distribution represented as histograms.
Arguments:
---------
- hist1: tuple of numpy.ndarray (density, bins),
len(bins) = len(density) + 1.
The integral of the density wrt bins sums to 1.
- hist2: same format.
Returns:
--------
- float, value of the Jensen Shannon divergence.
'''
bins = np.sort(np.unique(np.array(list(hist1[1]) + list(hist2[1]))))
masses1 = []
masses2 = []
for i, b in enumerate(bins[1::]):
if b <= hist1[1][0]:
masses1.append(0.)
elif b > hist1[1][-1]:
masses1.append(0.)
else:
j = 0
while b > hist1[1][j]:
j += 1
masses1.append((b-bins[i]) * hist1[0][j-1])
if b <= hist2[1][0]:
masses2.append(0.)
elif b > hist2[1][-1]:
masses2.append(0.)
else:
j = 0
while b > hist2[1][j]:
j += 1
masses2.append((b-bins[i]) * hist2[0][j-1])
masses1 = np.array(masses1)
masses2 = np.array(masses2)
masses12 = (masses1+masses2)/2
return entropy(masses12) - (entropy(masses1) + entropy(masses2))/2
# @ray.remote
def return_fourier_from_dataset(graph_list, rot_init=settings.rot_init):
"""
Returns the fourier transform of evolution for a list of graphs for
the hamiltonian ising and xy.
Arguments:
---------
- graph_list: list or numpy.Ndarray of networkx.Graph objects
Returns:
--------
- fs_xy: numpy.Ndarray of shape (2, len(graph_list), 1000)
[0,i]: Fourier signal of graph i at 1000 points for
hamiltonian XY
[1,i]: frequencies associated to graph i at 1000 points
for hamiltonian XY
- fs_is: same for the Ising hamiltonian
"""
fs_xy = np.zeros((2, len(graph_list), 1000))
fs_is = np.zeros((2, len(graph_list), 1000))
for i, graph in enumerate(graph_list):
fs_xy[0][i], fs_xy[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='xy')
fs_is[0][i], fs_is[1][i] = generate_signal_fourier(graph,
rot_init=rot_init,
N_sample=1000,
hamiltonian='ising')
return fs_xy, fs_is
def return_evolution(G, times, pulses, evol='xy'):
"""
Returns the final state after the following evolution:
- start with empty sate with as many qubits as vertices of G
- uniform superposition of all states
- alternating evolution of H_evol during times, and H_m during pulses
Arguments:
---------
- G: graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
Returns:
--------
- state: qutip.Qobj final state of evolution
"""
assert evol in ['xy', 'ising']
assert len(times) == len(pulses)
N_nodes = G.number_of_nodes()
H_evol = generate_Ham_from_graph(G, type_h=evol)
H_m = generate_mixing_Ham(N_nodes)
state = generate_empty_initial_state(N_nodes)
opts = Options()
opts.store_states = True
result = sesolve(H_m, state, [0, np.pi/4], options=opts)
state = result.states[-1]
for i, theta in enumerate(pulses):
if np.abs(times[i]) > 0:
if evol == 'xy':
result = sesolve(H_evol, state, [0, times[i]], options=opts)
state = result.states[-1]
else:
hexp = (- times[i] * 1j * H_evol).expm()
state = hexp * state
if np.abs(theta) > 0:
result = sesolve(H_m, state, [0, theta], options=opts)
state = result.states[-1]
return state
def return_list_of_states(graphs_list,
times, pulses, evol='xy', verbose=0):
"""
Returns the list of states after evolution for each graph following
return_evolution functions.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- times: list of times to evolve following H_evol, list or np.ndarray
- pulses: list of times to evolve following H_m, list or np.ndarray
same length as times
- evol: type of evolution for H_evol 'ising' or 'xy'
- verbose: int, display the progression every verbose steps
Returns:
--------
- all_states: list of qutip.Qobj final states of evolution,
same lenght as graphs_list
"""
all_states = []
for G in tqdm(graphs_list, disable=verbose==0):
all_states.append(return_evolution(G, times, pulses, evol))
return all_states
def return_energy_distribution(graphs_list, all_states, observable_func=None, return_energies=False, verbose=0):
"""
Returns all the discrete probability distributions of a diagonal
observable on a list of states each one associated with a graph. The
observable can be different for each state. The distribution is taken of
all possible values of all observables.
Arguments:
---------
- graphs_list: iterator of graph networkx.Graph objects
- all_states: list of qutip.Qobj states associated with graphs_list
- observable_func: function(networkx.Graph):
return qtip.Qobj diagonal observable
- return_energies: boolean
Returns:
--------
- all_e_masses: numpy.ndarray of shape (len(graphs_list), N_dim)
all discrete probability distributions
- e_values_unique: numpy.ndarray of shape (N_dim, )
if return_energies, all energies
"""
all_e_distrib = []
all_e_values_unique = []
for i, G in enumerate(tqdm(graphs_list, disable=verbose==0)):
if observable_func == None:
observable = generate_Ham_from_graph(
G, type_h='ising', type_ising='z'
)
else:
observable = observable_func(G)
e_values = observable.data.diagonal().real
e_values_unique = np.unique(e_values)
state = all_states[i]
e_distrib = np.zeros(len(e_values_unique))
for j, v in enumerate(e_values_unique):
e_distrib[j] = np.sum(
(np.abs(state.data.toarray()) ** 2)[e_values == v]
)
all_e_distrib.append(e_distrib)
all_e_values_unique.append(e_values_unique)
e_values_unique = np.unique(np.concatenate(all_e_values_unique, axis=0))
all_e_masses = []
for e_distrib, e_values in zip(all_e_distrib, all_e_values_unique):
masses = np.zeros_like(e_values_unique)
for d, e in zip(e_distrib, e_values):
masses[e_values_unique == e] = d
all_e_masses.append(masses)
all_e_masses = np.array(all_e_masses)
if return_energies:
return all_e_masses, e_values_unique
return all_e_masses
def extend_energies(target_energies, energies, masses):
"""
Extends masses array with columns of zeros for missing energies.
Arguments:
---------
- target_energies: numpy.ndarray of shape (N_dim, ) target energies
- energies: numpy.ndarray of shape (N_dim_init, ) energies of distributions
- masses: numpy.ndarray of shape (N, N_dim_init) discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N, N_dim)
all extended discrete probability distributions
"""
energies = list(energies)
N = masses.shape[0]
res = np.zeros((N, len(target_energies)))
for i, energy in enumerate(target_energies):
if energy not in energies:
res[:, i] = np.zeros((N, ))
else:
res[:, i] = masses[:, energies.index(energy)]
return res
def merge_energies(e1, m1, e2, m2):
"""
Merge the arrays of energy masses, filling with zeros the missing energies in each.
N_dim is the size of the union of the energies from the two distributions.
Arguments:
---------
- e1: numpy.ndarray of shape (N_dim1, ) energies of first distributions
- m1: numpy.ndarray of shape (N1, N_dim1) first discrete probability distributions
- e2: numpy.ndarray of shape (N_dim2, ) energies of first distributions
- m2: numpy.ndarray of shape (N2, N_dim2) first discrete probability distributions
Returns:
--------
- numpy.ndarray of shape (N1, N_dim)
all extended first discrete probability distributions
- numpy.ndarray of shape (N2, N_dim)
all extended second discrete probability distributions
"""
e = sorted(list(set(e1) | set(e2)))
return extend_energies(e, e1, m1), extend_energies(e, e2, m2)
def return_js_square_matrix(distributions, verbose=0):
"""
Returns the Jensen-Shannon distance matrix of discrete
distributions.
Arguments:
---------
- distributions: numpy.ndarray of shape (N_sample, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
js_matrix = np.zeros((len(distributions), len(distributions)))
for i in range(len(distributions)):
for j in range(i + 1):
masses1 = distributions[i]
masses2 = distributions[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
js_matrix[j, i] = js
return js_matrix
def return_js_matrix(distributions1, distributions2, verbose=0):
"""
Returns the Jensen-Shannon distance matrix between discrete
distributions.
Arguments:
---------
- distributions1: numpy.ndarray of shape (N_samples_1, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
- distributions2: numpy.ndarray of shape (N_samples_2, N_dim)
matrix of probability distribution represented on
each row. Each row must sum to 1.
Returns:
--------
- js_matrix: numpy.ndarray Jensen-Shannon distance matrix
of shape (N_sample, N_sample)
"""
assert distributions1.shape[1] == distributions2.shape[1], \
"Distributions must have matching dimensions. Consider using merge_energies"
js_matrix = np.zeros((len(distributions1), len(distributions2)))
for i in trange(len(distributions1), desc='dist1 loop', disable=verbose<=0):
for j in trange(len(distributions2), desc='dist2 loop', disable=verbose<=1):
masses1 = distributions1[i]
masses2 = distributions2[j]
js = entropy((masses1+masses2)/2) -\
entropy(masses1)/2 - entropy(masses2)/2
js_matrix[i, j] = js
return js_matrix
class Memoizer:
"""
Will store results of the provided observable on graphs to avoid recomputing.
Storage is based on a key computed using get_key
Attributes:
-----------
- observable: function(networkx.Graph):
return qtip.Qobj diagonal observable
- get_key: function(networkx.Graph):
return a key used to identify the graph
"""
def __init__(self, observable, get_key=None):
self.graphs = {}
self.observable = observable
self.get_key = get_key if get_key is not None else Memoizer.edges_key
@staticmethod
def edges_unique_key(graph):
"""
Key insensitive to how edges of the graph are returned
(order of edges and order of nodes in edges).
Same result for [(a, b), (c, d)] and [(d, c), (a, b)]
"""
edges = list(map(sorted, graph.edges))
return tuple(map(tuple, sorted(edges, key=itemgetter(0,1))))
@staticmethod
def edges_key(graph):
""" Simple key based on the edges list """
return tuple(graph.edges())
def get_observable(self, graph):
"""
Gets observable on graph
Uses memoization to speed up the process if graph has been seen before
Arguments:
---------
- graph: networkx.Graph to get observable on
Returns:
--------
- qtip.Qobj, diagonal observable
"""
key = self.get_key(graph)
if key not in self.graphs:
self.graphs[key] = self.observable(graph)
return self.graphs[key]
|
24,913 | e307e437e644df31ee394c5e2297a705a5def34d | import csv
from django.shortcuts import render
from .models import CsvList
from .forms import UploadForm
def upload(request, *args, **kwargs):
form = UploadForm(request.POST or None, request.FILES or None)
if request.method == "POST":
if form.is_valid():
instance = form.save(commit=True)
reader = csv.DictReader(instance.upload.file)
for row in reader:
parsing, created = CsvList.objects.get_or_create(
id=row['id'],
first_name=row['first_name'],
last_name=row['last_name'],
email=row['email'],
domain=row['domain'],
url=row['url']
)
context = {
"form": form,
}
return render(request, "django_parser/upload.html", context, )
def list(request):
queryset = CsvList.objects.order_by('id')
context = {
"queryset": queryset,
}
return render(request, "django_parser/list.html", context, )
|
24,914 | eb9a8a72f7e8da7bc7e2d703cc3959906eaf8a9d | from django.urls import re_path, include
from .views import RegistrationAPIView, LoginAPIView, IsTeacherAPIView, ContentAPIView
urlpatterns = [
re_path(r'^register/?$', RegistrationAPIView.as_view(), name='user_registration'),
re_path(r'^auth/?$', LoginAPIView.as_view(), name='user_login'),
re_path(r'^is_teacher/?$', IsTeacherAPIView.as_view(), name='is_teacher'),
re_path(r'^edit/content/?$', ContentAPIView.as_view(), name='content'),
] |
24,915 | 8fb4aeb74e344918410ea35fa777ce6103c0b260 | """
example, which shows the difference between the new approach and a fix
efficiency
"""
from oemof.thermal.concentrating_solar_power import csp_precalc
import pandas as pd
import matplotlib.pyplot as plt
# precaluculation #
dataframe = pd.read_csv('CSP_data/data_Muscat_22_8.csv', sep=';')
dataframe['Datum'] = pd.to_datetime(dataframe['Datum'])
df_temp_amb_series = pd.read_csv('CSP_data/data_Muscat_22_8_midday.csv',
sep=';')
temp_amb_series = pd.read_csv('CSP_data/temp_ambience.csv', sep=';')['t_amb']
# parameters for the precalculation
periods = 24
latitude = 23.614328
longitude = 58.545284
timezone = 'Asia/Muscat'
collector_tilt = 10
collector_azimuth = 180
x = 0.9
a_1 = -0.00159
a_2 = 0.0000977
eta_0 = 0.816
c_1 = 0.0622
c_2 = 0.00023
temp_collector_inlet = 435
temp_collector_outlet = 500
# plot showing the difference between a constant efficiency without considering
# cleaniness for the heat of the collector during a day
data_precalc = csp_precalc(dataframe, periods,
latitude, longitude, timezone,
collector_tilt, collector_azimuth, x, a_1, a_2,
eta_0, c_1, c_2,
temp_collector_inlet, temp_collector_outlet,
date_col='Datum', temp_amb_col='t_amb')
heat_calc = data_precalc['collector_heat']
irradiance_on_collector = data_precalc['collector_irradiance'] / (x**1.5)
heat_compare = irradiance_on_collector * eta_0
t = list(range(1, 25))
fig, ax = plt.subplots()
ax.plot(t, heat_calc, label='CSP precalculation')
ax.plot(t, heat_compare, label='constant efficiency')
ax.set(xlabel='time (h)', ylabel='Q_coll',
title='Heat of the collector')
ax.grid()
ax.legend()
plt.savefig('compare_precalculations.png')
# plot showing the difference between a constant efficiency and the efficiency
# depending on the ambient temperature for the same irradiance and hour of the
# day
df_result = pd.DataFrame()
for i in range(len(temp_amb_series)):
df_temp_amb_series['t_amb'] = temp_amb_series[i]
data_precalc_temp_amb = csp_precalc(
df_temp_amb_series, 1,
latitude, longitude, timezone,
collector_tilt, collector_azimuth, x, a_1, a_2,
eta_0, c_1, c_2,
temp_collector_inlet, temp_collector_outlet,
date_col='Datum', temp_amb_col='t_amb')
df_result = df_result.append(data_precalc_temp_amb, ignore_index=True)
fig, ax = plt.subplots()
ax.plot(temp_amb_series, df_result['eta_c'],
label='efficiency depending on ambient temperature')
ax.plot(temp_amb_series, [eta_0] * 24, label='constant efficiency')
ax.set_ylim(0, 1)
ax.set(xlabel='ambient temperature', ylabel='eta_collector',
title='collectors efficiency')
ax.grid()
ax.legend()
plt.show()
plt.savefig('compare_temp_dependency.png')
|
24,916 | aa9252fc5440cae805ac4ef057c96d2bf254e117 | import twitter
def build_corpus(username, api):
"""
Build the corpus using a twitter name
:return: (String) corpus of training data
"""
print('getting tweets for user: ', username)
timeline = api.GetUserTimeline(screen_name=username, count=200)
tweets = [t.text for t in timeline]
corpus = ' '.join(tweets)
return corpus
def get_keys():
"""
Get the Keys for Twitter account authentication
:return: (Tuple) consumer_api_key, consumer_secret_key, access_token, access_secret_token
"""
keys = []
with open('keys', 'r') as file:
for line in file:
keys.append(line.strip('\n'))
return tuple(keys)
def get_api():
"""
Get the twitter API
:return: Twitter API object
"""
consumer_api_key, consumer_secret_key, access_token, access_secret_token = get_keys()
return twitter.Api(consumer_key=consumer_api_key,
consumer_secret=consumer_secret_key,
access_token_key=access_token,
access_token_secret=access_secret_token)
|
24,917 | 941482b17ab2265993dc76e65e7e1d494c3e38e1 | from app import ma
from app.models.father import Father
class FatherSchema(ma.ModelSchema):
class Meta:
model = Father
dump_only = ('id',)
exclude = ('sons',)
|
24,918 | feeea0d29ce8ecb069b7b50d3ae5787a8d209936 | import libfoolang
inputs = [
('complete case 1', "def a"),
('complete case 2', "def a (b)"),
('complete case 3', "def a (b) {c}"),
('complete case 4', "var a"),
('complete case 5', "var a (b)"),
('complete case 6', "var a (b, c, d)"),
('complete case 7', ". a (b)"),
('complete case 8', ". a (b) {c}"),
('complete case 9', ", a b"),
('complete case 10', "(a) , b c"),
# The def and var rules check that incomplete results are produced
# regarding the presence of several cut parsers.
('incomplete case 1', "def"),
('incomplete case 2', "def a (b"),
('incomplete case 3', "def a (b) {c"),
('incomplete case 4', "def a ("),
('incomplete case 5', "def a (b) {"),
('incomplete case 6', "def a ( {"),
('incomplete case 7', "def a (b {c"),
('incomplete case 8', "var"),
('incomplete case 9', "var a ("),
('incomplete case 10', "var a ()"),
('incomplete case 11', "var a (b, c, d"),
# The dot rule checks that an incomplete result is produced if only the
# optional part can set the no_backtracing variable.
('incomplete case 12', ". a (b"),
('incomplete case 13', ". a (b) {"),
('incomplete case 14', ". a ( {"),
# The comma rule is similar to the dot one but the optional part is at the
# beginning of the rule.
('incomplete case 15', ", b"),
('incomplete case 16', "(a) , b"),
('incomplete case 17', "(a , b"),
]
ctx = libfoolang.AnalysisContext()
for name, text in inputs:
print(f"=== {name}: {text} ===")
print()
u = ctx.get_from_buffer("buffer", buffer=text)
for d in u.diagnostics:
print(d)
u.root.dump()
print()
|
24,919 | cfbe99b2346229c1b56dd56059682de11989b1ea | __author__ = 'atma6951'
__date__ = '3/20/2016'
from StyleFunctions import *
if __name__ == '__main__':
portalURL = r"https://www.arcgis.com/"
username = 'username'
password = 'password'
referer = r'https://www.arcgis.com'
folderPath = r'E:\GIS_Data\file_formats\VTPK\vectorStyle_cleaned'
itemID = '85d10a827d98487abc2432e725f41140' #Provide id of item that has to be updated
url2service = r'https://basemaps.arcgis.com/v1/arcgis/rest/services/World_Basemap/VectorTileServer'
token = getToken(portalURL, username,password,referer)
update_fonts = False
if (token):
itemExists = findItem(itemID, portalURL, token)
if (itemExists):
print('Update start time: ' + getTimeStamp())
if update_fonts:
deleteSucceeded = deleteResources_fromPortalItem(itemID, portalURL, token, username)
if (deleteSucceeded):
addResources_styles(portalURL, username, token, itemID, folderPath + r'\resources\styles', url2service)
addResources_sprites(portalURL, username, token, itemID, folderPath + r'\resources\sprites')
addResources_info(portalURL, username, token, itemID, folderPath + r'\resources\info')
addResources_fonts(portalURL, username, token, itemID, folderPath + r'\resources\fonts')
print('Update end time: ' + getTimeStamp())
else:
print('Delete operation failed. No resources were updated')
else:
#Dont update fonts code
deleteSucceeded = deleteResources_fromPortalItem(itemID, portalURL,
token, username, update_fonts = update_fonts)
if deleteSucceeded:
addResources_styles(portalURL, username, token, itemID, folderPath + r'\resources\styles', url2service)
addResources_sprites(portalURL, username, token, itemID, folderPath + r'\resources\sprites')
print("Update end time: " + getTimeStamp())
else:
print("Delete operation failed. No resources were updated")
else:
print("Item cannot be added to portal. Halting script")
else:
print("Token not generated. Halting script") |
24,920 | a498fe1468eb8b8943e336dbf8a90a9b9afe7853 | """
Print the following pyramid of increasing numbers.
A. User inputs the height.
B. User inputs the last number printed.
1
23
456
78910
1112131415
"""
user_input = int(input("Enter the height: ").strip())
k = 1
for i in range(user_input):
for j in range(k, i + k):
if k == 15:
break
print(j, end='')
k = j+1
print("")
|
24,921 | 70ce349c87fba7e8cebd60f12b37cf0578a7991e | from django.shortcuts import render
from django.views.generic.dates import (ArchiveIndexView, DateDetailView,
YearArchiveView, MonthArchiveView,
DayArchiveView)
from django.views.generic.list import ListView
from .models import Entry, Category
class CategoryListView(ListView):
model = Category
context_object_name = "categories"
def category_detail(request, slug):
cat = Category.objects.get(slug=slug)
entries = cat.entries.all()
return render(request, 'entry_archive.html', {'entries': entries})
class EntryArchiveIndexView(ArchiveIndexView):
queryset = Entry.live.all()
date_field = "pub_date"
context_object_name = "entries"
template_name = 'entry_archive.html'
class EntryDateDetailView(DateDetailView):
model = Entry
context_object_name = "entry"
date_field = "pub_date"
month_format = "%m" # default is %b
template_name = 'entry_detail.html'
class EntryYearArchiveView(YearArchiveView):
model = Entry
make_object_list = True
context_object_name = "entries"
date_field = "pub_date"
template_name = 'entry_archive.html'
allow_future = True
class EntryMonthArchiveView(MonthArchiveView):
model = Entry
context_object_name = "entries"
date_field = "pub_date"
month_format = "%m"
template_name = 'entry_archive.html'
class EntryDayArchiveView(DayArchiveView):
model = Entry
context_object_name = "entries"
date_field = "pub_date"
month_format = "%m"
template_name = 'entry_detail.html'
|
24,922 | 3476bf48b4c21b8a96b2b6ccff32c5c2f6b2a139 | #ROCK PAPER SCISSOR
inp1 = 1
inp2 = 1
s1 = 0
s2 = 0
while inp1 and inp2:
inp1 = input('player 1:enter choice (R/P/S)').upper()
inp2 = input('player 2:enter choice (R/P/S)').upper()
if (inp1 == 'R' and inp2 == 'P') or (inp1 == 'P' and inp2 == 'S') or (inp1 == 'S' and inp2 == 'R'):
s2 += 1
print('player 2 wins')
elif (inp2 == 'R' and inp1 == 'P') or (inp2 == 'P' and inp1 == 'S') or (inp2 == 'S' and inp1 == 'R'):
s1 += 1
print('player 1 wins')
else:
print('invalid input... enter correct choice')
print(f'Player 1 score: {s1}')
print(f'Player 2 score: {s2}')
|
24,923 | f7a2a67818ea85cf87abf4f46234dfbc73618e36 | #run code to check for 'problem entries' - anomalies between the number of students who enrolled vs the number of students who actually attended classes (engaged)
# output should be 3
num_problem_students = 0
for enrollment in enrollments:
student = enrollment['account_key']
if student not in unique_engagement_students and enrollment['join_date'] != enrollment['cancel_date']:
num_problem_students += 1
print(num_problem_students)
|
24,924 | 1c78f54bf2ab2eeab900487c29ad18a1605afb07 | from django.conf.urls import patterns, include, url
from django.conf import settings
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
url(r'^addCom$', 'gesualdo.views.messageprocessing.addComment', name='addComment'),
url(r'^remCom$', 'gesualdo.views.messageprocessing.refuseComment', name='refuseComment'),
url(r'^validateCom$', 'gesualdo.views.messageprocessing.validateComment', name='validateComment'),
url(r'^archiveCom', 'gesualdo.views.messageprocessing.archiveComment', name='archiveComment'),
) |
24,925 | 756467dd1b71e3cda37cb5a65b2471530fa1c2bf | import time, datetime, winsound
print("Welcome to the Pomodoro Effect!!!")
pomodoros= 0
n= "-" *25
def sleep_(time_):
time.sleep(time_)
fname = "C:/Users/User/Documents/$+pythonized/projects/media_dump/fit_music.wav"
winsound.PlaySound(fname, winsound.SND_FILENAME)
def work():
print(f"\nStart working (30mins):", datetime.datetime.now().strftime("%H:%M:%S"))
sleep_(60*30)
def short_break():
print(f"Time to take a short break (10mins): ", datetime.datetime.now().strftime("%H:%M:%S"))
sleep_(60*10)
def long_break():
print(f"Time to take a long break (30mins): ", datetime.datetime.now().strftime("%H:%M:%S"))
sleep_(60*10)
while True:
for i in range(3): #Three short breaks
work()
short_break()
work()
long_break()
pomodoros+=1
print(f"\n{n}\n\tPomodoros done: {pomodoros}\n{n}")
|
24,926 | 1e6f3f8b5d87850b2bb64792ec53dbff4c1713af | from Crypto.PublicKey import RSA
from Crypto.Hash import MD5
from Crypto.Signature import pkcs1_15
key = RSA.generate(2048)
def generateKey():
'''
准备一个私钥文件,一个公钥文件,一个数据文件
'''
private_key = key.export_key()
public_key = key.publickey().export_key()
data = '''利用Python实现RSA数字签名的产生和验证过程。
任务1:准备一个私钥文件,一个公钥文件,一个数据文件;
任务2:定义一个函数,能够使用指定的私钥对数据文件进行签名,并将签名结果输出到文件返回;
任务3:定义一个函数,能够使用指定的公钥对任务2中的签名文件进行验证,返回验证结果;
任务4:利用任务1中的文件对任务2和3中的函数进行测试。'''
with open("private_key.pem", "wb") as prifile,open("public_key.pem", "wb") as pubfile,open("data.txt","a") as datafile:
prifile.write(private_key)
pubfile.write(public_key)
datafile.write(data)
# 读取data的内容
with open("data.txt","r") as datafile:
data = datafile.read()
print(data)
def signAturer(private_key,data):
'''
签名函数,能够使用指定的私钥对数据文件进行签名,并将签名结果输出到文件返回
'''
# 获取消息的HASH值,摘要算法MD5,验证时也必须用MD5
digest = MD5.new(data.encode('utf-8'))
# 使用私钥对HASH值进行签名
signature = pkcs1_15.new(private_key).sign(digest)
# 将签名结果写入文件
sig_results = open("sign_results.txt","wb")
sig_results.write(signature)
sig_results.close()
return sig_results
def verifier(public_key,data,signature):
'''
签名验证函数,能够使用指定的公钥对任务2中的签名文件进行验证,返回验证结果
'''
digest = MD5.new(data.encode('utf-8'))
try:
pkcs1_15.new(public_key).verify(digest, signature)
print("验证成功!!!")
except:
print("签名无效!!!")
def test():
'''
利用任务1中的文件对任务2和3中的函数进行测试。
'''
with open('private_key.pem') as prifile,open('data.txt') as datafile:
private_key = RSA.import_key(prifile.read())
data = datafile.read()
signAturer(private_key,data)
with open('public_key.pem') as pubfile,open('data.txt') as datafile,open('sign_results.txt','rb') as sigfile:
public_key = RSA.import_key(pubfile.read())
data = datafile.read()
signature = sigfile.read()
verifier(public_key,data,signature)
if __name__ == "__main__":
test()
|
24,927 | 8aefc88c973f392460ee2694f7fa934a32123ef1 | #!/usr/bin/env python
# coding: utf-8
# ## Taxi Ride Fare Prediction Using Kubeflow and Feast
#
# * Predict taxi ride fares using Feast and Kubeflow
# Setup the notebook
# - Install `feast` with pip.
# - Activate user service account with credentials JSON.
# - Hacks to retrieve essential information for deployments and serving.
#
# **NOTE**: This code block might hangs for a long time.
# fairing:include-cell
import fairing
import sys
import importlib
import uuid
import logging
import os
import json
import requests
import pandas as pd
import numpy as np
from retrying import retry
from feast.sdk.resources.entity import Entity
from feast.sdk.resources.storage import Storage
from feast.sdk.resources.feature import Feature, Datastore, ValueType
from feast.sdk.resources.feature_set import FeatureSet, FileType
import feast.specs.FeatureSpec_pb2 as feature_pb
from feast.sdk.importer import Importer
from feast.sdk.client import Client
# ## Load raw data
# ## Extract more features
# ## Register entity and features
# ## Define a Feature Set for this project
# ## Retrieve a Training Set from Feast
# ## Visualize statistics with TFDV
# ## Train Linear Model
# fairing:include-cell
class TaxiRideModel(object):
"""Model class."""
SERVING_FEATURE_SET = [
'taxi_ride.passenger_count',
'taxi_ride.distance_haversine',
'taxi_ride.distance_dummy_manhattan',
'taxi_ride.direction',
'taxi_ride.month',
'taxi_ride.day_of_month',
'taxi_ride.day_of_week',
'taxi_ride.hour']
def __init__(self):
self.m = None
self.b = None
self.fs = None
self.serving_fs = None
logging.basicConfig(level=logging.INFO,
format=('%(levelname)s|%(asctime)s'
'|%(pathname)s|%(lineno)d| %(message)s'),
datefmt='%Y-%m-%dT%H:%M:%S',
)
logging.getLogger().setLevel(logging.INFO)
# Train model
def train(self, training_df):
np.set_printoptions(precision=3)
train_data = training_df[[x.split('.')[1] for x in TRAINING_FEATURES_SET]].to_numpy()
train_data[:, len(train_data[0]) - 1] = 1
Y = training_df['fare_amount'].to_numpy()
x = np.linalg.lstsq(train_data, Y, rcond=0)[0]
m, b = x[:len(train_data[0])-1], x[len(train_data[0])-1]
self.m = m
self.b = b
return m,b
def predict(self, feature_id, feature_names):
logging.info('feature_id = %s', feature_id)
logging.info('feature_names = %s', feature_names)
if any([i is None for i in [self.m, self.b, self.fs, self.serving_fs]]):
with open('simple_model.dat', 'r') as f:
model = json.load(f)
self.m = np.array(model.get('m', []))
self.b = float(model.get('b', 0))
_FEAST_CORE_URL = model['FEAST_CORE_URL']
_FEAST_SERVING_URL = model['FEAST_SERVING_URL']
_ENTITY_ID = model['ENTITY_ID']
logging.info('FEAST_CORE_URL: %s', _FEAST_CORE_URL)
logging.info('FEAST_SERVING_URL: %s', _FEAST_SERVING_URL)
logging.info('ENTITY_ID: %s', _ENTITY_ID)
logging.info('FEATURES_SET: %s', self.SERVING_FEATURE_SET)
self.fs = Client(core_url=_FEAST_CORE_URL,
serving_url=_FEAST_SERVING_URL,
verbose=True)
self.serving_fs = FeatureSet(
entity=_ENTITY_ID,
features=self.SERVING_FEATURE_SET)
features = self.fs.get_serving_data(
self.serving_fs,
entity_keys=[feature_id])
X = features.to_numpy()[0][1:]
logging.info('X: %s', str(X))
return [sum(self.m * X) + self.b]
def save_model(self, model_path):
"""Save the model to a json file."""
MODEL_FILE = 'simple_model.dat'
model = {
'm': self.m.tolist(),
'b': self.b,
'FEAST_CORE_URL': FEAST_CORE_URL,
'FEAST_SERVING_URL': FEAST_SERVING_URL,
'ENTITY_ID': ENTITY_ID,
}
logging.info('Saving model to %s', model_path)
with open(model_path, 'w+') as f:
json.dump(model, f)
# ## Save the model
# ## Local Prediction
# ## Use fairing to build the docker image
#
# * This uses the append builder to rapidly build docker images
# ## Deploy with Kubeflow
# ## Call the prediction endpoint
if __name__ == "__main__":
import fire
import logging
logging.basicConfig(format='%(message)s')
logging.getLogger().setLevel(logging.INFO)
fire.Fire(TaxiRideModel)
|
24,928 | 65a7e0c3c5342e9320bd65d66b4c0bc2d491d3cb |
import math
def first_func(x,y):
diff=x*x-x-2*x*y-y*y+y
return diff
def second_func(x,d):
diff=2*x*x-2*x-d*d+d
return diff
def root_generator(d):
x=(1+math.sqrt(1+2*d*(d-1)))/2
return x
##print first_func(15,6)
##print "ahaha"
##for total in range(1, 21,1):
## tmptotal=total*1
total=10**12
total-=1000
max_counter=100000000
###total+=max_counter
step=1
itotal=total
while (itotal<total+max_counter):
n=root_generator(itotal)
n1=int(n)
if (n1==n):
x=n1
y=itotal-x
##print second_func(x,itotal)
#print 'ffff:%20.10f '%(n1)
n2=second_func(x,itotal)
if (n2==0):
print 'blue:%20.0f total:%20.0f'%(n1,itotal)
## if (first_func(x,y)==0):
## print '%20.10f '%(n1)
# if (icount>1000000):
# print icount+jcount
# jcount+=icount
# icount=0
itotal+=step
print itotal
print icount
|
24,929 | f5bc0f9ccb064510cd83f4da69a8d074e8311201 | class Colors:
BLACK = '\033[30m'
RED = '\033[31m'
GREEN = '\033[32m'
YELLOW = '\033[33m'
BLUE = '\033[34m'
MAGENTA = '\033[35m'
CYAN = '\033[36m'
WHITE = '\033[37m'
UNDERLINE = '\033[4m'
RESET = '\033[0m'
BRIGHT_BLACK = '\033[90m'
BRIGHT_RED = '\033[91m'
BRIGHT_GREEN = '\033[92m'
BRIGHT_YELLOW = '\033[93m'
BRIGHT_BLUE = '\033[94m'
BRIGHT_MAGENTA = '\033[95m'
BRIGHT_CYAN = '\033[96m'
BRIGHT_WHITE = '\033[97m'
BRIGHT_END = '\033[0m'
|
24,930 | 58bc9039885cc8884a338043ffd435081c9740ba | seconds = int(input("Введите количество секунд "))
hours = seconds // 3600
seconds = seconds - (hours * 3600)
minutes = seconds // 60
seconds = seconds - (minutes * 60)
print(f"Введеное вами время в формате ЧЧ:ММ:СС составляет {hours}:{minutes}:{seconds}")
|
24,931 | 8c7636a6aca76ebf1793ab859a684cbf5e270468 | from typing import Dict, Optional
import click
from covid_shared import ihme_deps, paths
from loguru import logger
from covid_model_seiir_pipeline.lib import cli_tools
from covid_model_seiir_pipeline.pipeline.forecasting.specification import ForecastSpecification
from covid_model_seiir_pipeline.pipeline.forecasting.data import ForecastDataInterface
from covid_model_seiir_pipeline.pipeline.forecasting.workflow import ForecastWorkflow
def do_forecast(run_metadata: cli_tools.RunMetadata,
specification: ForecastSpecification,
output_root: Optional[str], mark_best: bool, production_tag: str,
preprocess_only: bool,
with_debugger: bool,
input_versions: Dict[str, cli_tools.VersionInfo]) -> ForecastSpecification:
specification, run_metadata = cli_tools.resolve_version_info(specification, run_metadata, input_versions)
output_root = cli_tools.get_output_root(output_root, specification.data.output_root)
cli_tools.setup_directory_structure(output_root, with_production=True)
run_directory = cli_tools.make_run_directory(output_root)
specification.data.output_root = str(run_directory)
run_metadata['output_path'] = str(run_directory)
run_metadata['forecast_specification'] = specification.to_dict()
cli_tools.configure_logging_to_files(run_directory)
# noinspection PyTypeChecker
main = cli_tools.monitor_application(forecast_main,
logger, with_debugger)
app_metadata, _ = main(specification, preprocess_only)
cli_tools.finish_application(run_metadata, app_metadata,
run_directory, mark_best, production_tag)
return specification
def forecast_main(app_metadata: cli_tools.Metadata,
specification: ForecastSpecification,
preprocess_only: bool):
logger.info(f'Starting beta forecast for version {specification.data.output_root}.')
data_interface = ForecastDataInterface.from_specification(specification)
# Check scenario covariates the same as regression covariates and that
# covariate data versions match.
# data_interface.check_covariates(specification.scenarios)
data_interface.make_dirs(scenario=list(specification.scenarios))
data_interface.save_specification(specification)
if not preprocess_only:
forecast_wf = ForecastWorkflow(specification.data.output_root,
specification.workflow)
n_draws = data_interface.get_n_draws()
forecast_wf.attach_tasks(n_draws=n_draws,
scenarios=specification.scenarios)
try:
forecast_wf.run()
except ihme_deps.WorkflowAlreadyComplete:
logger.info('Workflow already complete')
logger.info(f'Forecast version {specification.data.output_root} complete.')
@click.command()
@cli_tools.pass_run_metadata()
@cli_tools.with_specification(ForecastSpecification)
@cli_tools.add_output_options(paths.SEIR_FORECAST_OUTPUTS)
@cli_tools.add_preprocess_only
@cli_tools.add_verbose_and_with_debugger
@cli_tools.with_version(paths.SEIR_REGRESSION_OUTPUTS)
def forecast(run_metadata,
specification,
output_root, mark_best, production_tag,
preprocess_only,
verbose, with_debugger,
**input_versions):
"""Perform beta forecast for a set of scenarios on a regression."""
cli_tools.configure_logging_to_terminal(verbose)
do_forecast(
run_metadata=run_metadata,
specification=specification,
output_root=output_root,
mark_best=mark_best,
production_tag=production_tag,
preprocess_only=preprocess_only,
with_debugger=with_debugger,
input_versions=input_versions,
)
logger.info('**Done**')
|
24,932 | fba1a31fa5f9c62aad6b44cb7aadefaa2f2f8bab | from output.models.nist_data.atomic.g_year.schema_instance.nistschema_sv_iv_atomic_g_year_max_inclusive_1_xsd.nistschema_sv_iv_atomic_g_year_max_inclusive_1 import NistschemaSvIvAtomicGYearMaxInclusive1
__all__ = [
"NistschemaSvIvAtomicGYearMaxInclusive1",
]
|
24,933 | 0d728550398d18a6bbb2bdadb6d9a53685587283 | from collections import deque
def bfs(start, target, words, visited):
q = deque()
q.append(start)
while q:
cur = q.popleft()
if cur == target:
break
for word in words:
diff = 0
for s1, s2 in zip(word, cur):
if s1 != s2:
diff += 1
if diff == 1 and visited[word] == 0:
visited[word] = visited[cur] + 1
q.append(word)
def solution(begin, target, words):
visited = {}
visited[begin] = 0
for word in words:
visited[word] = 0
bfs(begin, target, words, visited)
try: return visited[target]
except: return 0 |
24,934 | ca94e95da3402452c00938a1ab31b2e8cf659ef4 | {
'name': "FLSP Purchase Container",
'summary': """
To manage the overseas container """,
'description': """
Customizations performed:
- Container
- Receipt: new field container
prevents the receipt to be completed.
""",
'author': "Alexandre Sousa",
'website': "http://www.smartrendmfg.com",
# Categories can be used to filter modules in modules listing
'category': 'Other',
'version': '0.1',
# any module necessary for this one to work correctly
'depends': ['base', 'stock', 'purchase', 'mail', 'purchase_stock'],
# always loaded
'data': [
'security/security.xml',
'security/ir.model.access.csv',
'views/flsp_purchase_container.xml',
'views/flsp_purchase_container_stock_picking.xml',
'wizard/flsp_purchase_container_stock_picking_wiz.xml',
'wizard/flsp_purchase_container_po_wiz.xml',
'wizard/flsp_purchase_container_po_line_wiz.xml',
'views/stock_move.xml',
],
}
|
24,935 | 78ff9497dcc7e87be2fa695e874ff2857275b80d | a, b = map(int, input().split())
b = b - 45
if b < 0:
a = a - 1
b = 60 + b
if a < 0:
a = 24 + a
print(f'{a} {b}')
|
24,936 | 4566994c74c245243963832e571cd8f960c86809 | #!/bin/python
import argparse
import json
def extract(json, param, value):
new_feature_list = []
for idx, feature in enumerate(json["features"]):
if param in feature["properties"]:
if str(feature["properties"][param]) == str(value):
new_feature_list.append(feature)
json["features"] = new_feature_list
print(len(json["features"]),"feature with",param,"=",value,"extracted")
return json
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input file path string")
parser.add_argument("output", help="output file path string")
parser.add_argument("property", help="property name to look for")
parser.add_argument("value", help="property value to extract")
args = parser.parse_args()
print("extracting features from",args.input)
jsondata = ""
with open(args.input) as file:
jsondata = json.load(file)
jsondata = extract(jsondata, args.property, args.value)
with open(args.output,"w") as outfile:
outfile.write(json.dumps(jsondata)) |
24,937 | 1ccdedc32c573dc0f6cd39cddd6e52bfd9b0c4bd | import numpy as np
import torch
import torch.nn.init as init
import cv2
from torch.autograd import Variable
from .m_global import dtype
import os
import fnmatch
def weights_init_constant(m, std):
classname = m.__class__.__name__
# print classname
if classname.find('Conv') != -1:
m.weight.data.normal_(mean = 0.0, std = std)
if m.bias is not None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(0.)#zero_()
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, std)
m.bias.data.zero_()
def weights_init_xavier(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
#std = np.sqrt(2./(m.kernel_size[0]*m.kernel_size[1]*m.out_channels))
#m.weight.data.normal_(0.0, std)
#m.bias.data.zero_()
init.xavier_normal(m.weight.data)
if m.bias is not None:
init.constant(m.bias.data, 0.)
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
m.bias.data.fill_(0.0)
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.zero_()
def weights_init_msra(m):
classname = m.__class__.__name__
# print classname
if classname.find('Conv') != -1:
std = np.sqrt(2. / (m.kernel_size[0] * m.kernel_size[1] * m.in_channels))
# init.kaiming_uniform(m.weight.data, mode='fan_in')
m.weight.data.normal_(mean=0.0, std=std)
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
#print m.weight.data.numpy()
m.weight.data.fill_(1.)
#print m.weight.data.numpy()
m.bias.data.fill_(0.)#zero_()
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.001)
m.bias.data.zero_()
def weights_init_He_normal(m):
classname = m.__class__.__name__
# print classname
if classname.find('Transpose') != -1:
m.weight.data.normal_(0.0, 0.001)
if not m.bias is None:
m.bias.data.zero_()
elif classname.find('Conv') != -1:
# std = np.sqrt(2. / (m.kernel_size[0] * m.kernel_size[1] * m.out_channels))
# m.weight.data.normal_(0.0, std)
init.kaiming_normal(m.weight.data, a=0, mode='fan_out')
if not m.bias is None:
m.bias.data.zero_()
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1.)
if not m.bias is None:
m.bias.data.zero_()
elif classname.find('Linear') != -1:
m.weight.data.normal_(0.0, 0.001)
if not m.bias is None:
m.bias.data.zero_()
def cal_psnr(im1, im2):
mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()
psnr = 10 * np.log10(255 ** 2 / (mse + 1e-5))
return psnr
def cal_mse(im1, im2):
mse = ((im1.astype(np.float) - im2.astype(np.float)) ** 2).mean()
return mse
def evaluate_detection_network(net, test_dataset, config, iterations):
psnr = []
for i_test_batch in range(0, len(test_dataset) // config.test_batchsize ):
test_batched = next(test_dataset)
input = Variable(torch.from_numpy(np.asarray(test_batched[0]))).type(dtype)
output = net(input)
output = np.clip((output.cpu().data.numpy()) * 255., 0, 255).astype(np.uint8)
label = np.clip(np.asarray(test_batched[1]) * 255., 0, 255).astype(np.uint8)
if i_test_batch == 0:
output_patch = 10
output_image = np.clip(input.data.cpu().numpy()[output_patch, 0, :, :] * 255, 0, 255).astype(np.uint8)
cv2.imwrite(config.test_folder + '/test_image_iter_' + str(iterations) + '_input.png', output_image)
output_image = output[output_patch, 0, :, :]
cv2.imwrite(config.test_folder + '/test_image_iter_'+str(iterations)+'_output.png', output_image)
output_image = label[output_patch, 0, :, :]
cv2.imwrite(config.test_folder + '/test_image_iter_' + str(iterations) + '_GT.png', output_image)
for i in range(0, len(label)):
# test_psnr = cal_psnr(output[i,], label[i,])
# psnr.append(test_psnr)
test_mse = cal_mse(output[i,], label[i,])
psnr.append(test_mse)
return psnr
def evaluate_detection_network_hdf5_PR(net, test_dataset, config):
recall = np.asarray([])
for i_test_batch in range(0, len(test_dataset) // config.batch_size):
sample_batched = next(test_dataset)
input = Variable(torch.from_numpy(sample_batched[0])).type(dtype)
binary_criteria = 0.5
output = net(input)
output = output.cpu().data.numpy()
output[output >= binary_criteria] = 1
output[output < binary_criteria] = 0
label = sample_batched[1]
output = np.reshape(output, (output.shape[0], output.shape[1] * output.shape[2] * output.shape[3]))
label = np.reshape(label, (label.shape[0], label.shape[1] * label.shape[2] * label.shape[3]))
true_positive = np.sum(output * label, axis=1)
# precision = true_positive / (np.sum(output, axis=1) + 1e-6)
recall_batch = (true_positive + 1e-6) / (np.sum(label, axis=1) + 1e-6)
recall = np.append(recall, recall_batch)
# recall[recall > 1] = 1
return recall
# helper functions.
def make_dir(dir):
if not os.path.exists(dir):
os.makedirs(dir)
def removeLineFeed(line):
temp = line.split('\n')
return temp[0]
def read_lmdb_list(file):
with open(file) as f:
data = f.readlines()
data = list(map(removeLineFeed, data))
return data
def parse_class(classname):
split_result = classname.split('_')
noise = split_result[0]
scale = split_result[1]
noise = noise[4:]
scale = scale[1:]
return noise, int(scale)
def sort_list(images):
list.sort(images, key=str.lower)
def worker_init_fn(worker_id):
np.random.seed((torch.initial_seed() + worker_id) % (2 ** 32))
def find_test_image_h5(test_dataset, config):
for i_test_batch in range(0, len(test_dataset) // config.test_batchsize):
test_batched = next(test_dataset)
label = np.asarray(test_batched[1])
label = np.reshape(label, (label.shape[0] * label.shape[1], label.shape[2] * label.shape[3]))
label_sum = np.sum(label, axis=1)
for i in range(label.shape[0]):
if label_sum[i] > 0:
return i_test_batch, i
#Helper functions.
def list_all_dir(path):
result = []
files = os.listdir(path)
for file in files:
m = os.path.join(path, file)
if os.path.isdir(m):
result.append(m)
return result
def list_all(path):
files = os.listdir(path)
return list(map(join_path(path), files))
def findfiles(path, fnmatchex='*.*'):
result = []
for root, dirs, files in os.walk(path):
for filename in fnmatch.filter(files, fnmatchex):
fullname = os.path.join(root, filename)
result.append(fullname)
return result
def list_png_files(path):
return findfiles(path, '*.png')
def read_images(files):
result = []
for file in files:
if os.path.isdir(file):
result.append(file)
return result
def join_path(base_dir):
def sub_func(file):
return os.path.join(base_dir, file)
return sub_func
def load_image(image_file):
image = cv2.imread(image_file)
return image
def load_image_list(image_file_list):
images = list(map(cv2.imread, image_file_list))
return images
def RGB_TO_YCRCB(image_rgb):
image_yuv = cv2.cvtColor(image_rgb, cv2.COLOR_BGR2YCR_CB)
return image_yuv
def YCRCB_TO_RGB(image_yuv):
image_rgb = cv2.cvtColor(image_yuv, cv2.COLOR_YCR_CB2BGR)
return image_rgb
def RGB_TO_YCRCB_BATCH(images):
return list(map(RGB_TO_YCRCB, images))
def extractChannel(channel):
def sub_func(image):
return image[:, :, channel]
return sub_func
def image_to_file(image, file):
cv2.imwrite(file, image)
def extractChannel_batch(channel):
def subfunc(image_list):
return list(map(extractChannel(channel), image_list))
return subfunc |
24,938 | cbbf3d31d741d607bbe67a04d2a56ed50525385f | # Copyright 2021 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""An abstract class for the Trainer for both CAIP and uCAIP."""
import abc
import datetime
import json
from typing import Any, Dict, List, Optional, Text, Union
from absl import logging
from google.cloud.aiplatform import gapic
from google.cloud.aiplatform_v1beta1.types.custom_job import CustomJob
from google.cloud.aiplatform_v1beta1.types.job_state import JobState
from googleapiclient import discovery
from tfx import types
from tfx.types import artifact_utils
from tfx.utils import telemetry_utils
from tfx.utils import version_utils
# Default container image being used for CAIP training jobs.
_TFX_IMAGE = 'gcr.io/tfx-oss-public/tfx:{}'.format(
version_utils.get_image_version())
# Entrypoint of cloud AI platform training. The module comes from `tfx`
# package installation into a default location of 'python'.
_CONTAINER_COMMAND = ['python', '-m', 'tfx.scripts.run_executor']
_UCAIP_ENDPOINT_SUFFIX = '-aiplatform.googleapis.com'
_UCAIP_JOB_STATE_SUCCEEDED = JobState.JOB_STATE_SUCCEEDED
_UCAIP_JOB_STATE_FAILED = JobState.JOB_STATE_FAILED
_UCAIP_JOB_STATE_CANCELLED = JobState.JOB_STATE_CANCELLED
class AbstractJobClient(abc.ABC):
"""Abstract class interacting with CAIP CMLE job or uCAIP CustomJob."""
JOB_STATES_COMPLETED = () # Job states for success, failure or cancellation
JOB_STATES_FAILED = () # Job states for failure or cancellation
def __init__(self):
self.create_client()
self._job_name = '' # Assigned in self.launch_job()
@abc.abstractmethod
def create_client(self) -> None:
"""Creates the job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
pass
@abc.abstractmethod
def create_training_args(self, input_dict, output_dict, exec_properties,
executor_class_path, training_inputs,
job_id) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given.
Returns:
A dict containing the training arguments
"""
pass
@abc.abstractmethod
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification.
"""
pass
@abc.abstractmethod
def launch_job(self,
job_id: Text,
parent: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches a long-running job.
Args:
job_id: The job ID of the AI Platform training job.
parent: The project name in the form of 'projects/{project_id}'
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
pass
@abc.abstractmethod
def get_job(self) -> Union[Dict[Text, Text], CustomJob]:
"""Gets the the long-running job."""
pass
@abc.abstractmethod
def get_job_state(
self,
response: Union[Dict[Text, Text], CustomJob]) -> Union[Text, JobState]:
"""Gets the state of the long-running job.
Args:
response: The response from get_job
Returns:
The job state.
"""
pass
def get_job_name(self) -> Text:
"""Gets the job name."""
return self._job_name
class CAIPJobClient(AbstractJobClient):
"""Class for interacting with CAIP CMLE job."""
JOB_STATES_COMPLETED = ('SUCCEEDED', 'FAILED', 'CANCELLED')
JOB_STATES_FAILED = ('FAILED', 'CANCELLED')
def create_client(self) -> None:
"""Creates the discovery job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
self._client = discovery.build('ml', 'v1')
def create_training_args(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
executor_class_path: Text,
training_inputs: Dict[Text, Any],
job_id: Optional[Text]) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Training input argument for AI Platform training job.
'pythonModule', 'pythonVersion' and 'runtimeVersion' will be inferred.
For the full set of parameters, refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
job_id: Job ID for AI Platform Training job. If not supplied,
system-determined unique ID is given. Refer to
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#resource-job
Returns:
A dict containing the training arguments
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# We use custom containers to launch training on AI Platform, which invokes
# the specified image using the container's entrypoint. The default
# entrypoint for TFX containers is to call scripts/run_executor.py. The
# arguments below are passed to this run_executor entry to run the executor
# specified in `executor_class_path`.
container_command = _CONTAINER_COMMAND + [
'--executor_class_path',
executor_class_path,
'--inputs',
json_inputs,
'--outputs',
json_outputs,
'--exec-properties',
json_exec_properties,
]
if not training_inputs.get('masterConfig'):
training_inputs['masterConfig'] = {
'imageUri': _TFX_IMAGE,
}
# Always use our own entrypoint instead of relying on container default.
if 'containerCommand' in training_inputs['masterConfig']:
logging.warn('Overriding custom value of containerCommand')
training_inputs['masterConfig']['containerCommand'] = container_command
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been a stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job ID if not explicitly specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
training_args = {
'job_id': job_id,
'project': project,
'training_input': training_inputs,
'job_labels': job_labels
}
return training_args
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The job ID of the AI Platform training job.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
Returns:
The job specification. See
https://cloud.google.com/ai-platform/training/docs/reference/rest/v1/projects.jobs
"""
job_spec = {
'jobId': job_id,
'trainingInput': training_input,
'labels': job_labels,
}
return job_spec
def launch_job(self,
job_id: Text,
project: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches a long-running job.
Args:
job_id: The job ID of the AI Platform training job.
project: The GCP project under which the training job will be executed.
training_input: Training input argument for AI Platform training job. See
https://cloud.google.com/ml-engine/reference/rest/v1/projects.jobs#TrainingInput
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
parent = 'projects/{}'.format(project)
job_spec = self._create_job_spec(job_id, training_input, job_labels)
# Submit job to AIP Training
logging.info('TrainingInput=%s', training_input)
logging.info('Submitting job=\'%s\', project=\'%s\' to AI Platform.',
job_id, parent)
request = self._client.projects().jobs().create(
body=job_spec, parent=parent)
self._job_name = '{}/jobs/{}'.format(parent, job_id)
request.execute()
def get_job(self) -> Dict[Text, Text]:
"""Gets the long-running job."""
request = self._client.projects().jobs().get(name=self._job_name)
return request.execute()
def get_job_state(self, response) -> Text:
"""Gets the state of the long-running job.
Args:
response: The response from get_job
Returns:
The job state.
"""
return response['state']
class UCAIPJobClient(AbstractJobClient):
"""Class for interacting with uCAIP CustomJob."""
JOB_STATES_COMPLETED = (_UCAIP_JOB_STATE_SUCCEEDED, _UCAIP_JOB_STATE_FAILED,
_UCAIP_JOB_STATE_CANCELLED)
JOB_STATES_FAILED = (_UCAIP_JOB_STATE_FAILED, _UCAIP_JOB_STATE_CANCELLED)
def __init__(self, ucaip_region: Text):
if ucaip_region is None:
raise ValueError('Please specify a region for uCAIP training.')
self._region = ucaip_region
super().__init__()
def create_client(self) -> None:
"""Creates the Gapic job client.
Can also be used for recreating the job client (e.g. in the case of
communication failure).
Multiple job requests can be done in parallel if needed, by creating an
instance of the class for each job. Note that one class instance should
only be used for one job, as each instance stores variables (e.g. job_id)
specific to each job.
"""
self._client = gapic.JobServiceClient(
client_options=dict(api_endpoint=self._region + _UCAIP_ENDPOINT_SUFFIX))
def create_training_args(self, input_dict: Dict[Text, List[types.Artifact]],
output_dict: Dict[Text, List[types.Artifact]],
exec_properties: Dict[Text, Any],
executor_class_path: Text,
training_inputs: Dict[Text, Any],
job_id: Optional[Text]) -> Dict[Text, Any]:
"""Get training args for runner._launch_aip_training.
The training args contain the inputs/outputs/exec_properties to the
tfx.scripts.run_executor module.
Args:
input_dict: Passthrough input dict for tfx.components.Trainer.executor.
output_dict: Passthrough input dict for tfx.components.Trainer.executor.
exec_properties: Passthrough input dict for
tfx.components.Trainer.executor.
executor_class_path: class path for TFX core default trainer.
training_inputs: Spec for CustomJob for AI Platform (Unified) custom
training job. See
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1/CustomJobSpec
for the detailed schema.
job_id: Display name for AI Platform (Unified) custom training job. If not
supplied, system-determined unique ID is given. Refer to
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1/projects.locations.customJobs
Returns:
A dict containing the training arguments
"""
training_inputs = training_inputs.copy()
json_inputs = artifact_utils.jsonify_artifact_dict(input_dict)
logging.info('json_inputs=\'%s\'.', json_inputs)
json_outputs = artifact_utils.jsonify_artifact_dict(output_dict)
logging.info('json_outputs=\'%s\'.', json_outputs)
json_exec_properties = json.dumps(exec_properties, sort_keys=True)
logging.info('json_exec_properties=\'%s\'.', json_exec_properties)
# We use custom containers to launch training on AI Platform (unified),
# which invokes the specified image using the container's entrypoint. The
# default entrypoint for TFX containers is to call scripts/run_executor.py.
# The arguments below are passed to this run_executor entry to run the
# executor specified in `executor_class_path`.
container_command = _CONTAINER_COMMAND + [
'--executor_class_path',
executor_class_path,
'--inputs',
json_inputs,
'--outputs',
json_outputs,
'--exec-properties',
json_exec_properties,
]
if not training_inputs.get('worker_pool_specs'):
training_inputs['worker_pool_specs'] = [{}]
for worker_pool_spec in training_inputs['worker_pool_specs']:
if not worker_pool_spec.get('container_spec'):
worker_pool_spec['container_spec'] = {
'image_uri': _TFX_IMAGE,
}
# Always use our own entrypoint instead of relying on container default.
if 'command' in worker_pool_spec['container_spec']:
logging.warn('Overriding custom value of container_spec.command')
worker_pool_spec['container_spec']['command'] = container_command
# Pop project_id so AIP doesn't complain about an unexpected parameter.
# It's been a stowaway in aip_args and has finally reached its destination.
project = training_inputs.pop('project')
with telemetry_utils.scoped_labels(
{telemetry_utils.LABEL_TFX_EXECUTOR: executor_class_path}):
job_labels = telemetry_utils.get_labels_dict()
# 'tfx_YYYYmmddHHMMSS' is the default job display name if not explicitly
# specified.
job_id = job_id or 'tfx_{}'.format(
datetime.datetime.now().strftime('%Y%m%d%H%M%S'))
training_args = {
'job_id': job_id,
'project': project,
'training_input': training_inputs,
'job_labels': job_labels
}
return training_args
def _create_job_spec(
self,
job_id: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> Dict[Text, Any]:
"""Creates the job spec.
Args:
job_id: The display name of the AI Platform (Unified) custom training job.
training_input: Spec for CustomJob for AI Platform (Unified) custom
training job. See
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1/CustomJobSpec
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
Returns:
The CustomJob. See
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1/projects.locations.customJobs
"""
job_spec = {
'display_name': job_id,
'job_spec': training_input,
'labels': job_labels,
}
return job_spec
def launch_job(self,
job_id: Text,
project: Text,
training_input: Dict[Text, Any],
job_labels: Optional[Dict[Text, Text]] = None) -> None:
"""Launches a long-running job.
Args:
job_id: The display name of the AI Platform (Unified) custom training job.
project: The GCP project under which the training job will be executed.
training_input: Spec for CustomJob for AI Platform (Unified) custom
training job. See
https://cloud.google.com/ai-platform-unified/docs/reference/rest/v1/CustomJobSpec
for the detailed schema.
job_labels: The dict of labels that will be attached to this job.
"""
parent = 'projects/{project}/locations/{location}'.format(
project=project, location=self._region)
job_spec = self._create_job_spec(job_id, training_input, job_labels)
# Submit job to AIP Training
logging.info('TrainingInput=%s', training_input)
logging.info('Submitting custom job=\'%s\', project=\'%s\''
' to AI Platform (Unified).', job_id, parent)
response = self._client.create_custom_job(parent=parent,
custom_job=job_spec)
self._job_name = response.name
def get_job(self) -> CustomJob:
"""Gets the long-running job."""
return self._client.get_custom_job(name=self._job_name)
def get_job_state(self, response) -> JobState:
"""Gets the state of the long-running job.
Args:
response: The response from get_job
Returns:
The job state.
"""
return response.state
def get_job_client(
enable_ucaip: Optional[bool] = False,
ucaip_region: Optional[Text] = None
) -> Union[CAIPJobClient, UCAIPJobClient]:
"""Gets the job client.
Args:
enable_ucaip: Whether to enable uCAIP
ucaip_region: Region for training endpoint in uCAIP.
Defaults to 'us-central1'.
Returns:
The corresponding job client.
"""
if enable_ucaip:
return UCAIPJobClient(ucaip_region)
return CAIPJobClient()
|
24,939 | bf1729949eb780177fc3055c196480e29b4efc3f | # Stubs for tensorflow.python.debug.wrappers.hooks (Python 3)
#
# NOTE: This dynamically typed stub was automatically generated by stubgen.
from tensorflow.core.protobuf import config_pb2 as config_pb2
from tensorflow.python.debug.lib import debug_utils as debug_utils, stepper as stepper
from tensorflow.python.debug.wrappers import dumping_wrapper as dumping_wrapper, framework as framework, grpc_wrapper as grpc_wrapper, local_cli_wrapper as local_cli_wrapper
from tensorflow.python.training import session_run_hook as session_run_hook
from typing import Any as Any, Optional as Optional
class LocalCLIDebugHook(session_run_hook.SessionRunHook):
def __init__(self, ui_type: str = ..., dump_root: Optional[Any] = ..., thread_name_filter: Optional[Any] = ...) -> None: ...
def add_tensor_filter(self, filter_name: Any, tensor_filter: Any) -> None: ...
def begin(self) -> None: ...
def before_run(self, run_context: Any): ...
def after_run(self, run_context: Any, run_values: Any) -> None: ...
class DumpingDebugHook(session_run_hook.SessionRunHook):
def __init__(self, session_root: Any, watch_fn: Optional[Any] = ..., thread_name_filter: Optional[Any] = ..., log_usage: bool = ...) -> None: ...
def begin(self) -> None: ...
def before_run(self, run_context: Any): ...
def after_run(self, run_context: Any, run_values: Any) -> None: ...
class GrpcDebugHook(session_run_hook.SessionRunHook):
def __init__(self, grpc_debug_server_addresses: Any, watch_fn: Optional[Any] = ..., thread_name_filter: Optional[Any] = ..., log_usage: bool = ...) -> None: ...
def before_run(self, run_context: Any): ...
class TensorBoardDebugHook(GrpcDebugHook):
def __init__(self, grpc_debug_server_addresses: Any, thread_name_filter: Optional[Any] = ..., send_traceback_and_source_code: bool = ..., log_usage: bool = ...) -> None: ...
def before_run(self, run_context: Any): ...
|
24,940 | eabdeadb6724fdab9d2273b1de9eafc80626a40b | import sqlite3,random
class DatabaseDriver:
def __init__(self, name):
self.db = sqlite3.connect(f'{name}.db')
cur = self.db.cursor()
cur.execute("create table if not exists dataset (word STRING, succesor STRING, times INT);")
self.db.commit()
def insert_combination(self, word, succesor, times):
cur = self.db.execute('select times from dataset where word=? and succesor=?', (word,succesor))
res = cur.fetchall()
if len(res) == 0:
self.db.execute('insert into dataset values(?,?,?)', (word, succesor, times))
else:
times = times + int(res[0][0])
self.db.execute('update dataset set times = ? where word=? and succesor=?', (times, word, succesor))
self.db.commit()
def get_word_info(self, word):
cur = self.db.execute('select succesor, times from dataset where word=?', (word,))
return cur.fetchall()
def clean_word(self, word):
self.db.execute('delete from dataset where word=?', (word,))
class MarkovBot:
def __init__(self, database):
self.db_driver = DatabaseDriver(database)
def digest_text(self, text):
words = text.split()
for i in range(len(words)-1):
self.db_driver.insert_combination(words[i], words[i+1], 1)
def build_intervals(self, word_info):
total_elements = 0
probs = 0
intervals = []
for i in range(len(word_info)):
this_elements_weight = word_info[i][1]
total_elements += this_elements_weight
print(total_elements)
for i in range(len(word_info)):
this_elements_weight = word_info[i][1]
intervals.append((probs, probs + this_elements_weight / total_elements, word_info[i][0]))
probs += this_elements_weight/total_elements
return intervals
def generate_text(self, begin_with, word_cap):
text = begin_with
current_word = text.split()[-1]
word_info = self.db_driver.get_word_info(current_word)
i = 0
while i < word_cap and len(word_info) != 0:
next_word = ""
intervals = self.build_intervals(word_info)
rand = random.random()
for interval in intervals:
if interval[0] <= rand < interval[1]:
next_word = interval[2]
break
text = text + f" {next_word}"
word_info = self.db_driver.get_word_info(next_word)
i += 1
return text
if __name__ == '__main__':
a = MarkovBot("pepito")
elem_info = a.db_driver.get_word_info("la")
print(elem_info)
print(a.build_intervals(elem_info))
print(a.generate_text("la", 10)) |
24,941 | 77aedb8e0b548c90a2d64db9af13c2f898d6f382 | from bs4 import BeautifulSoup as bs
import urllib2
import pickle
import numpy as np
import scipy.io
import matplotlib.pyplot as plt
import random as rnd
from sklearn import svm
from sklearn.metrics import confusion_matrix
from sklearn import cross_validation
from sklearn.grid_search import GridSearchCV
from labels import *
import sys
def IngredientScraper2(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
R[food][recipename] = {}
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][ingid] = amt
#normalize values
m = sum(R[food][recipename].values())
R[food][recipename]={ingid: R[food][recipename][ingid]/m for ingid in R[food][recipename].keys()}
#Recipes = {}
#ingsorted = sorted(I.keys())
#for food in R.keys():
##m = sum(R[food].values())
##normalize values
##R[food] = {ingid: R[food][ingid]/m for ingid in R[food].keys()}
#Recipes[food] = [0]*len(ingsorted)
#for i in range(len(ingsorted)):
###if ingredient is in dish R[food]
#if ingsorted[i] in R[food]:
#Recipes[food][i] = R[food][ingsorted[i]]
#m = sum(Recipes[food])
#Recipes[food] = [x/m for x in Recipes[food]]
pickle.dump((I,R),file('AllRecipesIngImageNet.npy','w'))
#return I,R
#=================================================================================
# Ingredient Scraper with cooking terms and nutritional info
def IngredientScraper(fooddish):
#dictionary for ingredients
I = {}
#dictionary for food recipes
R = {}
website = 'http://allrecipes.com'
for food in fooddish:
R[food] = {}
#search for food
print food
#for page in xrange(2):
resultspage = urllib2.urlopen("http://allrecipes.com/search/default.aspx?qt=k&wt="+food)
results = bs(resultspage)
for recipelinks in results.find_all('a',class_='title'):
recipelink = recipelinks.get('href')
#go to recipe page
recipepage = urllib2.urlopen(website+recipelink)
recipe = bs(recipepage)
recipename = recipe.find('h1',id='itemTitle').text
if recipename not in R[food]:
#print "Recipe: ", recipename
#ingredients for this recipe
ingredients = recipe.find_all('li', id='liIngredient')
#list containing ingredients, cookingterms, nutritionrating
R[food][recipename] = [{},[],[0]*7]
for ing in ingredients:
ingid = ing.attrs['data-ingredientid']
ingname = ing.find(id='lblIngName').text
if ingid not in I:
I[ingid] = ingname
amt=float(ing.attrs['data-grams'])
R[food][recipename][0][ingid] = amt
#normalize values
m = sum(R[food][recipename][0].values())
R[food][recipename][0]={ingid: R[food][recipename][0][ingid]/m for ingid in R[food][recipename][0].keys()}
#get cooking terms
directions = [step.text.lower() for step in recipe.find_all('span', class_='plaincharacterwrap break')]
R[food][recipename][1] = directions
#get nutrition
nutritionrating = recipe.find_all('ul', id='ulNutrient')
n = 0
for nutrient in nutritionrating:
#category = nutrient.find('li',class_='categories').text
R[food][recipename][2][n]=float(nutrient.find('li',id='divNutrientGradient').attrs['style'][6:-1])/100
n += 1
pickle.dump((I,R),file('AllRecipesIng50FoodExtra.npy','w'))
#================================================================================
#X = np.zeros((len(trainlabels),len(I.keys())-1),dtype=np.float32)
#ingsorted = sorted(I.keys())[1:]
#for i in xrange(len(trainlabels)):
##thresh = np.random.uniform(0,RecipeMax[trainlabels[i]],n)
#dish = fooddish[trainlabels[i]]
#X[i,:] = [1 if x != 0 else 0 for x in Recipes[dish][1:]]
##if len(R[dish].keys()) != 0:
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
##for j in xrange(len(ingsorted)):
##if ingsorted[j] in R[dish][recipe]:
###X[i,j] = R[dish][recipe][ingsorted[j]]
##X[i,j] = 1
###Recipes[food] = [0]*len(ingsorted)
###for i in range(len(ingsorted)):
####if ingredient is in dish R[food]
###if ingsorted[i] in R[food]:
###Recipes[food][i] = R[food][ingsorted[i]]
###X[i,:] = [1 if x>t else 0 for x,t in zip(Recipes[dish],thresh)]
###X[i,:] = Recipes[dish]
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,i]==1)
#print i, len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) == traindata.shape[0]:
#attr_labels[range(0,800,100)] = 0
#attributeclassifiers[i] = svm.SVC(kernel='linear',C=0.001)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())-1),dtype=np.float32)
#for i in xrange(len(testlabels)):
#print 'test case:', i
#Xtest[i,:] = [x.predict(testdata[i,:])[0] for x in attributeclassifiers]
#pickle.dump((X,Xtest),file('vlg_extractor_1.1.2/ImageNetSurveyMC/IngredientAttributes.npy','w'))
###fill out correlation matrix
#m = traindata.shape[1] #number of visual word
#n = len(I.keys()) #number of ingredients
#corr_mat = np.zeros((m,n))
#for i in xrange(len(trainlabels)):
#for visualword in xrange(m):
#if traindata[i,visualword] != 0:
##count co-occurrence of ingredient and visual word
##binaryIng = [1 if x!=0 else 0 for x in Recipes[fooddish[trainlabels[i]]]]
#corr_mat[visualword,:] = corr_mat[visualword,:] + X[i,:]
#pickle.dump(corr_mat,file('corr_mat50Food.npy','w'))
###traindata = np.concatenate((traindata,X),1)
##corr_mat = pickle.load(file('corr_mat.npy','r'))
###normalize corr_mat
#row_sums = corr_mat.sum(axis=1)
#row_sums = np.array([1 if x==0 else x for x in row_sums])
#corr_mat = corr_mat/row_sums[:,np.newaxis]
##avg = corr_mat.mean(axis=0)
#logcormat = np.log(corr_mat+1)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#x = np.dot(testdata[i,:],logcormat)
#Xtest[i,:] = x/sum(x)
##dish = fooddish[testlabels[i]]
###randomly pick recipe
##recipe = rnd.choice(R[dish].keys())
##print recipe
#for j in xrange(len(ingsorted)):
#if attributeclassifiers[j] is not None:
#Xtest[i,j]=attributeclassifiers[j].predict(testdata[i,:])
##if ingsorted[j] in R[dish][recipe]:
##Xtest[i,j] = 1
##Xtest[i,:] = [1 if xt>t else 0 for xt,t in zip(x,avg)]
#fig = plt.figure()
#ax = fig.add_subplot(5,2,10)
#count = [0]*len(ingsorted)
#for i in xrange(len(ingsorted)):
##find negative examples of attribute i
#pos_idx = np.where(X[np.where(trainlabels==9)[0],i]==1)
#count[i] = len(pos_idx[0])
#r = plt.bar(range(589),count)
#ax.set_xticks([])
#plt.xlabel(fooddish[9])
##ax = fig.add_subplot(522)
##r = plt.bar(range(440),Recipes['casserole'])
##ax.set_xticks([])
##plt.xlabel('casserole')
##ax = fig.add_subplot(523)
##r = plt.bar(range(440),Recipes['deviled%eggs'])
##ax.set_xticks([])
##plt.xlabel('deviledegg')
##ax = fig.add_subplot(524)
##r = plt.bar(range(440),Recipes['fried%rice'])
##ax.set_xticks([])
##plt.xlabel('friedrice')
##ax = fig.add_subplot(525)
##r = plt.bar(range(440),Recipes['kebab'])
##ax.set_xticks([])
##plt.xlabel('kebab')
##ax = fig.add_subplot(526)
##r = plt.bar(range(440),Recipes['samosa'])
##ax.set_xticks([])
##plt.xlabel('samosa')
##ax = fig.add_subplot(527)
##r = plt.bar(range(440),Recipes['pasta%salad'])
##ax.set_xticks([])
##plt.xlabel('pastasalad')
##ax = fig.add_subplot(528)
##r = plt.bar(range(440),Recipes['paella'])
##ax.set_xticks([])
##plt.xlabel('Paella')
##ax = fig.add_subplot(529)
##r = plt.bar(range(440),Recipes['spaghetti'])
##ax.set_xticks([])
##plt.xlabel('spaghetti')
##ax = fig.add_subplot(5,2,10)
##r = plt.bar(range(440),Recipes['roulade'])
##ax.set_xticks([])
##plt.xlabel('roulade')
#============== script to get top features ============================
#from sklearn.multiclass import OneVsRestClassifier
#import random as rnd
#recipedict='AllRecipesIng.npy'
#fooddish = fooddish[0]
#dataset = 'vlg_extractor/ImageNetSurveyMC/ImageNetSurveyMC'
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float32)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float32)
#testlabels = var['testlabels'].flatten()
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.uint8)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#k=5
##split training data into k-folds
#kfold = cross_validation.StratifiedKFold(trainlabels,k)
#param_grid = [
#{'estimator__C': [0.001, 0.01, 1, 10, 100], 'estimator__kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#svc = OneVsRestClassifier(svm.SVC(kernel='linear',C=1))
#svc.fit(X,trainlabels)
##clf = GridSearchCV(estimator=svc, param_grid=param_grid, cv=kfold, n_jobs=-1)
##clf.fit(np.concatenate((traindata,X),1),trainlabels)
#svm_weights = svc.coef_
#topfeatures = [None]*svm_weights.shape[0] #topfeatures for each class
#for i in xrange(svm_weights.shape[0]):
#featureIdx=np.argsort(abs(svm_weights[i,:]))
#topfeatures[i] = featureIdx[::-1][0:30] #get top 30
##allfeatures = sorted(list(set().union(*topfeatures)))
###print top features for each class
#for f in xrange(len(fooddish)):
#xlabels = [None]*30
#for ingIdx in xrange(30):
#print fooddish[f], I[ingsorted[topfeatures[f][ingIdx]]], svm_weights[f,topfeatures[f][ingIdx]]
#xlabels[ingIdx] = I[ingsorted[topfeatures[f][ingIdx]]]
#fig=plt.figure()
#ax = fig.add_subplot(111)
#r = plt.bar(range(30),svm_weights[f,topfeatures[f]],color='b')
#ax.set_xticks(np.arange(30)+0.5)
#ax.set_xticklabels(xlabels,rotation=90,fontsize=8)
#ax.set_title(fooddish[f])
#ax.set_ylabel('Feature Weights')
#plt.show()
#=============================END ==================================
##train classifier for each ingredient attribute
#attributeclassifiers = [None]*len(allfeatures)
#param_grid = [
#{'C': [0.001, 0.01, 1, 10, 100], 'kernel': ['linear']},
##{'estimator__C': [1, 10, 100, 1000], 'estimator__gamma': [0.01, 0.001, 0.0001], 'estimator__kernel': ['rbf']},
#]
#for i in xrange(len(allfeatures)):
##find positive examples of attribute i
#pos_idx = np.where(X[:,allfeatures[i]]==1)
#print I[ingsorted[allfeatures[i]]], len(pos_idx[0])
#attr_labels = np.zeros((len(trainlabels),),dtype=np.uint8)
#attr_labels[pos_idx[0]] = 1
##train classifier
#if len(pos_idx[0]) != 0:
#attributeclassifiers[i] = GridSearchCV(estimator=svm.SVC(), param_grid=param_grid, cv=kfold, n_jobs=-1)
#attributeclassifiers[i].fit(traindata,attr_labels)
#Xtest = np.zeros((len(testlabels),len(I.keys())),dtype=np.float32)
#for i in xrange(len(testlabels)):
#for j in xrange(len(allfeatures)):
#Xtest[i,allfeatures[j]]=attributeclassifiers[j].predict(testdata[i,:])[0]
#fig = plt.figure()
#ax = fig.add_subplot(111)
#res = ax.imshow(X,cmap=plt.cm.bone,interpolation='nearest',aspect='auto')
#cb = fig.colorbar(res)
#plt.show()
##testdata = np.concatenate((testdata,Xtest),1)
#==============script to output data for use with cygwin MKL ============
#dataset = "vlg_extractor/ImageNetSurveyPicodes2048/ImageNetSurveyPicodes2048"
#dataset = "BoW2/ImageNet/ImageNetBoW2"
#recipedict = recipeDict[0] #change this
#fooddish = fooddish[0] #change this
#var=scipy.io.loadmat(dataset)
#traindata = np.ndarray.astype(var['X'],dtype=np.float)
#trainlabels = np.ndarray.astype(var['trainlabels'].flatten(),dtype=np.int)
#testdata = np.ndarray.astype(var['Xtest'],dtype=np.float)
#testlabels = var['testlabels'].flatten()
#images = var['testimages'][0]
#Xtest = pickle.load(file("/".join(dataset.split('/')[0:2])+'/IngredientAttributes.npy','r'))
#I,R = pickle.load(file(recipedict,'r'))
#ingsorted = sorted(I.keys())[1:]
#X = np.zeros((len(trainlabels),len(ingsorted)),dtype=np.int)
#for i in xrange(len(trainlabels)):
#dish = fooddish[trainlabels[i]]
#if len(R[dish].keys()) != 0:
####randomly pick recipe
#recipe = rnd.choice(R[dish].keys())
##print recipe
#X[i,:] = [1 if ing in R[dish][recipe] else 0 for ing in ingsorted]
#np.savez_compressed(dataset+"-MKL",traindata=traindata, testdata=testdata, X=X,
# Xtest=Xtest, trainlabels=trainlabels,testlabels=testlabels)
#pred=np.load(dataset+"-MKL_predictions.npz")
#y_true = pred['y_true']
#y_pred = pred['y_pred']
#from sklearn.metrics import classification_report
#print classification_report(y_true,y_pred)
#================= SCRIPT TO FIND POPULAR INGREDIENTS ====================
#ingredient histogram
#IngHist = {}
#for food in fooddish:
#IngHist[food] = {}
#for recipe in R[food].keys():
#for ingredient in R[food][recipe].keys():
#if ingredient not in IngHist[food]:
#IngHist[food][ingredient] = 1
#else:
#IngHist[food][ingredient] += 1
#commonIngredients = [None]*len(fooddish)
#commonIngredientsIdx = []
#for f in xrange(len(fooddish)):
#commonIngredientsIdx.extend([ingsorted.index(x) for x in IngHist[fooddish[f]].keys() if IngHist[fooddish[f]][x] >= 2 and x != '0'])
#commonIngredientsIdx = sorted(set(commonIngredientsIdx))
#pickle.dump(commonIngredientsIdx,file('CommonIngredientsImageNet.npy','w'))
#fig = plt.figure()
#i=9
#ax = fig.add_subplot(1,1,1)
#r = plt.bar(np.arange(len(IngHist[fooddish[i]].keys())),IngHist[fooddish[i]].values())
#ax.set_xticks(np.arange(len(IngHist[fooddish[i]].keys()))+0.5)
#ax.set_xticklabels([I[x] for x in IngHist[fooddish[i]]],rotation=90,fontsize=8)
#ax.set_title(fooddish[i])
#ax.set_ylabel('Ingredient Count')
#plt.show()
if __name__=="__main__":
IngredientScraper(fooddish[int(sys.argv[1])])
|
24,942 | ba2beb504c2465b81f7bf80b753c28164027d123 | import csv
import sys
input_file = sys.argv[1]
output_file = sys.argv[2]
results = []
with open(input_file, 'rb') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
new_row = {}
if row['Billing Amount'][0] == '(':
continue
new_row['Description'] = row['Merchant'][:20]
new_row['Trans. Date'] = row['Transaction Date']
new_row['Post Date'] = row['Posting Date']
new_row['Amount'] = row['Billing Amount'][1:]
results.append(new_row)
with open(output_file, 'w') as csvfile:
fieldnames = ['Post Date', 'Trans. Date', 'Amount', 'Description']
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for row in results:
print(row)
writer.writerow(row)
print("done")
|
24,943 | 9cb82f791f5017c86b59ddcca76bbb0c7fce4754 | from selenium.webdriver import ActionChains
from utils.locators import *
from utils.users import Users
class ProductPage():
def __init__(self,driver):
self.driver = driver
self.addToBasketButton = Locators.addToBasketButton
def addToBasket(self):
driver = self.driver
driver.find_element(*self.addToBasketButton).click() |
24,944 | 6ba1a44443821c666c6126c6469c24a6cf0cfe73 | import numpy as np
import pandas as pd
def generate_random_array():
return np.random.randint(0, 100, (3, 6, 5))
generate_random_array() |
24,945 | 1254714fc6a3444a3b2f653c9a41bf8dfcbdfb08 | """pre-commit hook for checking mccabe complexity"""
__version__ = '0.13.0'
|
24,946 | 4a1ed8389727d7d9c160704a00357a97500d55f5 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
FILE_NAME = 'C-small-attempt0'
IN_FILE = FILE_NAME + '.in'
OUT_FILE = FILE_NAME + '.out'
f = open(IN_FILE, 'r')
src = f.read().split('\n')
f.close()
case_num = int(src.pop(0))
case_count = 0
result = ''
def recycle (num):
result = set()
line = str(num)
for i in range(1, len(line)):
new = int(line[i:] + line[:i])
if new != num:
result.add(new)
return result
print recycle(12)
print('############ start #################')
for case_count in range(case_num):
s = src.pop(0).strip().split(' ')
a, b = [int(score) for score in s]
nums = set(range(a, b))
count = 0
print "start: %d - %d" % (a, b)
while len(nums):
n = nums.pop()
rec = recycle(n)
rec = set(filter(lambda x: a <= x and x <= b, rec))
if len(rec):
print "found: %d => %s" % (n, rec)
count += (len(rec) + 1) * len(rec) / 2
nums -= rec
ans = 'Case #%d: %d\n' % (case_count + 1, count)
print ans
result += ans
print('############ end ###################')
f = open(OUT_FILE, 'w')
f.write(result)
f.close() |
24,947 | 5f6f194ae3b958ab9fc904c4fe35dd57c0d0dda0 | #!/usr/bin/python
# -*- coding: latin-1 -*-
"""
Created on 08/03/2021
@author: Jonathan Servia Mandome
"""
from parts.word.elements.pagenumber import PageNumber
from shape import AlternateContent, TextBox
def LineaToDc(linea, columnas):
dc = {}
for key, posicion in columnas.items():
dc[key] = eval(repr(linea[posicion]))
return dc
def CertificadoSAT(partes, args):
import document
import os
# import win32print,win32api
path_temp = 'C:/Users/Jonathan/Desktop/pyword/'
f = 'rtf.docx'
_doc_word = document.Document(path_temp, f)
_doc_word._debug = True
_doc_word.empty_document()
body = _doc_word.get_body()
header = _doc_word.get_default_header()
t = header.add_table([['1', document.new_page_number(header, 'Página ')]],
column_width=[5000, 5000], horizontal_alignment=['c', 'r'])
# x = AlternateContent(header, text='ccccccccc')
r_position=[]
r_position.append({'orientation':'horizontal', 'position':0, 'relative': 'column'})
r_position.append({'orientation':'vertical', 'position':0, 'relative': 'page'})
header.add_paragraph( TextBox(header, 'aaaaaaaaaaa', (500, 250), rotation=270, r_position=r_position,
horizontal_alignment='c', font_format='b', font_size=12) )
'''style = doc_word.get_part('style').get_doc_defaluts().get_rpr_defaults()
style.get_values()['sz'] = 18
style.get_values()['szCs'] = 18
doc_word.get_part('style').get_doc_defaluts().set_rpr_defaults(style)'''
# p.get_properties().set_pstyle('Principal')
texto_antes = '002'
txt_antes = open(path_temp + '%s.rtf' % texto_antes, 'r').read()
lineas_1 = [
['Nombre', 'deno_ccl'],
['Dirección', 'dir_ccl'],
['Población', 'pob_ccl'],
['E-mail', 'email_ccl'],
['REPRESENTANTE RESPONSABLE'],
['Nombre y apellidos', 'contacto_ccl'],
['Cargo', 'cargo_ccl']
]
# rt = open('c:/users/jonathan/desktop/002.rtf','r').read()
cw = ['25%', '75%']
def Formatea(paragraph):
paragraph.set_spacing({'after': '60', 'before': '60', 'line': '160'})
paragraph_d = _doc_word.Image(header, 'c:/users/jonathan/desktop/out/iso.jpg', 2000, 1300)
paragraph_i = _doc_word.Image(header, 'c:/users/jonathan/desktop/out/logo.png', 1800, 1200)
datos_intec = _doc_word.Table(header, column_width=['100%'])
datos_intec.get_properties().set_cell_margin({'start': {'w': 50}})
nombre_empresa = 'P_NME'
par, num_row = datos_intec.add_paragraph(nombre_empresa, horizontal_alignment='l', font_format='b')
Formatea(par)
data = 'P_DOM'
_cell_ = datos_intec.get_row(num_row).get_cell(0)
par1 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par1)
data = 'P_CDP' + ' ' + 'P_POB'
par2 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par2)
data = 'P_RI'
par3 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par3)
data = 'Tel: %s e-mail:%s' % ('P_TEL', 'P_MAIL')
par4 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par4)
datos_intec.set_foreground_colour('885500')
table = header.add_table([[paragraph_i, datos_intec, paragraph_d]], column_width=[2000, 6000, 2000],
horizontal_alignment=['l', 'l', 'r'])
table.get_row(0).get_cell(0).get_properties().set_vertical_alignment('center')
def FormateaTitulo(paragraph):
paragraph.set_font_format('b')
paragraph.set_font_size(14)
paragraph.set_spacing({'before': '180', 'after': '80'})
##
def Formatea(paragraph):
paragraph.set_spacing({'after': '60', 'before': '60', 'line': '160'})
datos_intec = _doc_word.Table(header, column_width=[6000])
nombre_empresa = 'P_NME'
par, num_row = datos_intec.add_paragraph(nombre_empresa, horizontal_alignment='l', font_format='b')
Formatea(par)
data = 'P_DOM'
_cell_ = datos_intec.get_row(num_row).get_cell(0)
par1 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par1)
data = 'P_CDP' + ' P_POB'
par2 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par2)
data = 'P_RI'
par3 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par3)
data = 'Tel: %s e-mail:%s' % ('P_TEL', 'P_MAIL')
par4 = _cell_.add_paragraph(data, horizontal_alignment='l', font_format='b')
Formatea(par4)
for i in range(0):
body.add_paragraph('')
p = body.add_paragraph('drmdrm')
p.get_properties().set_keep_next(True)
p.get_properties().set_pstyle('Descripcin')
tabla_1 = body.add_table(lineas_1, column_width=cw, borders={'all': {'sz': 4}})
tabla_1.get_properties().set_cell_margin({'start': {'w': 50}})
tabla_1.get_properties().set_table_caption('titulo')
tabla_1.set_spacing({'after': '80', 'before': '80', 'line': '180'})
tabla_1.get_row(4).get_cell(0).get_properties().set_grid_span(2)
tabla_1.get_row(4).get_cell(0).get_properties().set_table_cell_width('100%')
for row in tabla_1.get_rows():
for cell in row.get_cells():
cell.get_properties().set_vertical_alignment('center')
# tabla_1.get_row(4).get_cell(0).add_rtf(rt)
for row in tabla_1.get_rows():
for cell in row.get_cells():
cell.get_properties().set_vertical_alignment('center')
'''t = body.add_table([[None, '2'], ['t1', 't2'], borders={'all': {'sz': 4}})
t.get_row(1).get_cell(0).add_rtf(txt_antes)
t.set_foreground_colour('AA2233')'''
body.add_paragraph('')
body.add_paragraph('3333').SetFormatList()
body.add_paragraph('3333').SetFormatList()
body.add_paragraph('3333').SetFormatList('2', '1')
body.add_paragraph('999').SetFormatList('2')
body.add_paragraph('999').SetFormatList()
body.add_paragraph('3333').SetFormatList()
_doc_word.Save()
os.system('start ' + path_temp + f)
return _doc_word
if __name__ == '__main__':
import document
import os
partes = ['0026085']
args = {}
#_doc_word = CertificadoSAT(partes, args)
path_temp = 'C:/Users/Jonathan/Desktop/pyword/'
f = 'rtf.docx'
_doc_word = document.Document(path_temp, f)
_doc_word._debug = True
_doc_word.set_variables({'CL_DENO': [['Pedro','Marta', 'Maria'], ['PedroL','MartaL', 'MariaL']]})
_doc_word.empty_document()
body = _doc_word.get_body()
header = _doc_word.get_default_header()
r_position=[]
r_position.append({'orientation': 'horizontal', 'position': -5000, 'relative': 'column'})
r_position.append({'orientation': 'vertical', 'position': 6000, 'relative': 'paragraph'})
header.add_paragraph(TextBox(header, 'CL_DENO/1/2 23232 CL_DENO/0/1', (50, 5000), (8000, 500), rotation=270, r_position=r_position,
background_color='FFEFFF', horizontal_alignment='c', font_format='b', font_size=12))
_doc_word.save()
os.system('start ' + path_temp + f)
|
24,948 | 4e0266381a9713c8b1d5439dec0acbbfa1cd51b3 | # -*- coding: utf-8 -*-
from django.db import models
class Resource(models.Model):
id = models.CharField(max_length=255, primary_key=True)
name = models.CharField(max_length=255)
icon = models.CharField(max_length=255, blank=True)
num = models.IntegerField(default=0)
def __unicode__(self):
return self.name
class Meta:
db_table = 'resource'
verbose_name = "资源"
verbose_name_plural = "资源"
|
24,949 | 41b4df75f138b0ae611385419fb4b5caf78bc2cf | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0003_answersdistribution_dm_count'),
]
operations = [
migrations.AlterField(
model_name='question',
name='caption',
field=models.CharField(max_length=100, verbose_name='\u041d\u0430\u0437\u0432\u0430\u043d\u0438\u0435'),
),
]
|
24,950 | 8eced0eb25e53bc0afce564758a32e9fae0ca4ee | def setup(): # runs only once
size(400,400) # the size of the canvas
stroke(255) #
def draw(): # executes continuously
line(150, 25, mouseX, mouseY) # draw the line
saveFrame("output.png") # saves the lastest frame in the canvas
# saveFrame("output-###.png") # saves every frame in the canvas
def mousePressed(): # called when mouse is pressed
background(192, 64, 0) # reloads the drawing canvas with the color(rgb = 192,64,0)
|
24,951 | 0f692cc9695469c0d7143f75202fd02b94284cbd | k, x = map(int, input().split())
l = list(map(int, input().split()))
hl = [i // 2 for i in l]
from fractions import gcd
from functools import reduce
def lcm(a, b):
return a * b // gcd(a, b)
def lcm_list(l):
return reduce(lcm, l, 1)
a = lcm_list(hl)
i = hl[0]
cnt = 0
while i % 2 == 0:
cnt += 1
i //= 2
flag = True
for i in hl:
if i % (2 ** cnt) == 0 and i % (2 ** (cnt + 1)) != 0:
continue
else:
flag = False
break
if flag:
print((x - a) // (a * 2) + 1)
else:
print(0)
|
24,952 | 745d2037fd296be168cae73e3536161a66b87348 | import numpy as np
def dotProduct(vector1, vector2):
vector1 = np.reshape(vector1,-1)
vector2 = np.reshape(vector2,-1)
if(vector1.size != vector2.size):
return bool(0)
dots = np.array([])
i = 0
for num in vector1:
dots = np.append(dots, num * vector2[i])
i = i + 1
fin = 0
for num in dots:
fin = fin + num
return (fin)
#2 Vectors are right, if they there inter product is 0
def isRight(vector1,vector2):
if dotProduct(vector1 ,vector2) == int(0):
return True
else:
return False
|
24,953 | 561397a7ef15a4c590a92c8e6532f2cc54f2ee49 | #!/usr/bin/python3
import sys
import csv
import math
import collections
import pickle
import copy
import functools
import os
import os.path
import shutil
import matplotlib.pyplot as plt
def save_figure(figure, filename):
print("\tGenerating figure: {} ...".format(filename), end="")
figure.savefig("{}.pdf".format(filename))
figure.savefig("{}.png".format(filename))
pickle.dump(figure, open("{}.fig.pickle".format(filename), 'wb'))
print(" Done")
# stupid hack function used because picke can't handle lambda functions
def return_none():
return None
magic_translator = { "vec": { "new" : "status_new",
"data" : "status_data"},
"single_ts" : { "new" : "single_ts_rtt_new",
"data" : "single_ts_rtt"},
"all_ts" : { "new" : "all_ts_rtt_new",
"data" : "all_ts_rtt"}
}
def read_vpp_file(path):
analyzer_names = ("vec", "single_ts", "all_ts")
# os.system("sed -i $'s/\t//g' {}".format(path))
csvfile = open(path)
reader = csv.DictReader(csvfile, skipinitialspace=True)
#print(reader.fieldnames)
vpp_data = list()
ignore_count = 0
base_time = None
for row in reader:
if base_time == None:
base_time = float(row["time"])
# ignore the first two entries.
if ignore_count < 2:
ignore_count += 1
continue
vpp_entry = collections.defaultdict(return_none)
vpp_entry['time'] = float(row['time']) - base_time
vpp_entry['host'] = row["host"].strip()
vpp_entry["sec_num"] = int(row["seq_num"])
vpp_entry["total_state"] = int(row["total_state"])
for analyzer in analyzer_names:
#if row[magic_translator[analyzer]["new"]] == '1':
vpp_entry[analyzer] = float(row[magic_translator[analyzer]["data"]]) * 1000
if row[magic_translator[analyzer]["new"]] == '1':
vpp_entry[analyzer + "_new"] = 1
else:
vpp_entry[analyzer + "_new"] = 0
vpp_data.append(vpp_entry)
return vpp_data
def get_time_series(vpp_data, analyzer):
time = [x['time'] for x in vpp_data if x[analyzer + "_new"]]
rtt = [x[analyzer] for x in vpp_data if x[analyzer + "_new"]]
return time, rtt
def moving_minimum_filter(time_in, rtt_in):
rtt_out = list()
rtt_out.append(rtt_in[0])
time_buffer = collections.deque()
rtt_buffer = collections.deque()
rtt_buffer.append(rtt_in[0])
time_buffer.append(time_in[0])
cursor = 1
while cursor < len(rtt_in):
rtt_buffer.append(rtt_in[cursor])
time_buffer.append(time_in[cursor])
now = time_in[cursor]
start_of_window = now - rtt_out[-1]
# remove values older than one RTT from window
while time_buffer[0] < start_of_window:
time_buffer.popleft()
rtt_buffer.popleft()
# calculate new RTT estimate
rtt_estimate = min(rtt_buffer)
rtt_out.append(rtt_estimate)
cursor += 1
return rtt_out
def hack_moving_min_into_vpp_data(vpp_data, analyzer):
# first generate the smooth RTT data
time_raw, rtt_raw = get_time_series(vpp_data, analyzer)
rtt_smooth = moving_minimum_filter(time_raw, rtt_raw)
time_smooth = time_raw
# now insert it in to the VPP data structure
vpp_data_cursor = 0
smooth_data_cursor = 0
# forward to the point where the analyzer first has data
# insert zeros until then
while vpp_data[vpp_data_cursor]['time'] < time_smooth[smooth_data_cursor]:
vpp_data[vpp_data_cursor][analyzer + '_smooth'] = 0
vpp_data[vpp_data_cursor][analyzer + '_smooth_new'] = 0
# Now, for every measurement, forward
while smooth_data_cursor < len(time_smooth):
# insert the new measurement
vpp_data[vpp_data_cursor][analyzer + '_smooth'] = rtt_smooth[smooth_data_cursor]
vpp_data[vpp_data_cursor][analyzer + '_smooth_new'] = 1
vpp_data_cursor += 1
# advance vpp_data_cursor, until just before next measurement
while (vpp_data_cursor < len(vpp_data)) and \
smooth_data_cursor < len(rtt_smooth) - 1 and \
(vpp_data[vpp_data_cursor]['time'] < time_smooth[smooth_data_cursor + 1]):
vpp_data[vpp_data_cursor][analyzer + '_smooth'] = rtt_smooth[smooth_data_cursor]
vpp_data[vpp_data_cursor][analyzer + '_smooth_new'] = 0
vpp_data_cursor += 1
smooth_data_cursor += 1
# And, for the last measurement, keep placing it in the vpp_data
while vpp_data_cursor < len(vpp_data):
vpp_data[vpp_data_cursor][analyzer + '_smooth'] = rtt_smooth[-1]
vpp_data[vpp_data_cursor][analyzer + '_smooth_new'] = 0
vpp_data_cursor += 1
def make_ecdf_data(vpp_data, analyzerA, analyzerB, weighted = False, relative = False):
errors = list()
times = list()
time_cursor = None
current_error = None
A_ready = False
B_ready = False
for row in vpp_data:
## skip forward to point where there is
## data for both analyzers
if (not A_ready) or (not B_ready):
if row[analyzerA + "_new"]:
A_ready = True
if row[analyzerB + "_new"]:
B_ready = True
if A_ready and B_ready:
current_error = row[analyzerA] - row[analyzerB]
if relative and row[analyzerA] > 0:
current_error = current_error / row[analyzerA] * 100
time_cursor = row['time']
continue
# From now on there is data for both analyzers.
# If either one is updated, calculate error.
if row[analyzerA + "_new"] or row[analyzerB + "_new"]:
# record the info from the period that ended
last_time_delta = row['time'] - time_cursor
errors.append(current_error)
times.append(last_time_delta)
# start a new error period
current_error = row[analyzerA] - row[analyzerB]
if relative and row[analyzerA] > 0:
current_error = current_error / row[analyzerA] * 100
time_cursor = row['time']
# end the last measurement period
if time_cursor == None:
print("One analyzer has not data, not generating ECDF")
return None, None
last_time_delta = row['time'] - time_cursor
errors.append(current_error)
times.append(last_time_delta)
zipped = list(zip(errors, times))
zipped.sort()
errors, times = list(zip(*zipped))
if not weighted:
cum_prob = [i/len(errors) for i in range(1, len(errors)+1)]
else:
cum_prob = list()
running_total = 0
total_time = sum(times)
for i in range(len(errors)):
delta = times[i]/total_time
running_total += delta
cum_prob.append(running_total)
return errors, cum_prob
if __name__ == "__main__":
vpp_data = read_vpp_file(sys.argv[1])
#time = [x['time'] for x in vpp_data]
vec_rtt = get_time_series(vpp_data, 'vec')
single_ts_rtt = get_time_series(vpp_data, 'single_ts')
all_ts_rtt = get_time_series(vpp_data, 'all_ts')
#print(vec_rtt)
##
## Time series figure
##
plt.plot(single_ts_rtt[0], single_ts_rtt[1], 'y.', label="single ts")
plt.plot(all_ts_rtt[0], all_ts_rtt[1], 'b.', label="all ts ")
plt.plot(vec_rtt[0], vec_rtt[1], 'r.', label="vec ")
plt.legend()
plt.xlabel("time [s]")
plt.ylabel("rtt [ms]")
freq_text = "Number of samp les:\nall ts: {}\nsingle ts: {}\nvec: {}"
freq_text = freq_text.format(len(all_ts_rtt[0]),
len(single_ts_rtt[0]),
len(vec_rtt[0]))
plt.text(0.3, 0.9, freq_text, horizontalalignment='left',
verticalalignment='top', transform=plt.gca().transAxes)
##
## Ecdf
##
plt.figure()
x, y = make_ecdf_data(vpp_data, 'all_ts', 'vec')
plt.plot(x,y)
plt.xlabel("RTT error [ms]")
plt.ylabel("ECDF")
plt.ylim((0,1))
plt.xlim((-20,20))
plt.show()
|
24,954 | 1c0fa1d0af6a9ba5ea1e8e141121f24ac193399f | import traceback
def f():
g()
def g():
h()
def h():
1 / 0
def main():
try:
f()
except Exception as e:
print(traceback.format_exc(limit=2))
print("----------------------------------------")
sframe = traceback.extract_tb(e.__traceback__, limit=2)[1]
print(f"{sframe.filename}:{sframe.lineno}")
main()
|
24,955 | 09fcffa64b09683af75d347b5b5f22ccc15a5c2f | from selenium.common.exceptions import StaleElementReferenceException, NoSuchElementException
import resources.UtilitySuite.ActionSuite as actions
from resources.PageFactory.DemoqaPageObject import HomePage
from resources.PageFactory.Locators import HomePageLocators
from resources.UtilitySuite.AppConfig import HomePageParameters
from resources.UtilitySuite.AppConfig import LeftPanelParameters
from resources.PageFactory.DemoqaPageObject import LeftPanelSection
from resources.PageFactory.Locators import LeftPanelLocators
from resources.PageFactory.DemoqaPageObject import BookStoreApplicationPage
from resources.UtilitySuite.AppConfig import BookPageParameters
from resources.PageFactory.Locators import BookStoreApplicationPageLocators
from resources.PageFactory.DemoqaPageObject import LoginPage
from resources.PageFactory.Locators import LoginPageLocators
from resources.UtilitySuite.AppConfig import UniversialParameters
from resources.PageFactory.DemoqaPageObject import ElementsPage
from resources.PageFactory.Locators import ElementsPageLocators
from resources.UtilitySuite.AppConfig import ElementsPageParameters
import pytest
import time
import os.path
# from os import path
# from re import search
# from selenium.webdriver.support.ui import WebDriverWait
# from selenium.webdriver.support import expected_conditions as EC
# from selenium.webdriver.common.by import By
class TestCase:
def test_HomePageUI(self, record_property):
driver = actions.UserAction.setUp_env(self)
# Verify Elements link is displayed
homePage = HomePage(driver)
# Verify all six components are displayed in home page
record_property(" Verify Category Cards size", HomePageParameters.CATEGORYCARDS_SIZE)
CategoryCards = homePage.find_elements(*HomePageLocators.CATEGORUCARDS)
assert len(CategoryCards) == HomePageParameters.CATEGORYCARDS_SIZE
record_property("Verify First Category name", HomePageParameters.ELEMENTS_TEXT)
assert CategoryCards[0].text == HomePageParameters.ELEMENTS_TEXT
record_property("Verify Second Category name", HomePageParameters.FORMS_TEXT)
assert CategoryCards[1].text == HomePageParameters.FORMS_TEXT
assert CategoryCards[2].text == HomePageParameters.ALERTS_TEXT
assert CategoryCards[3].text == HomePageParameters.WIDGETS_TEXT
assert CategoryCards[4].text == HomePageParameters.INTERACTIONS_TEXT
assert CategoryCards[5].text == HomePageParameters.BOOKSTOREAPPLICATION_TEXT
# close application
actions.UserAction.tearDown_env(driver)
def test_BookStoreAppPageUI(self):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements(*HomePageLocators.CATEGORUCARDS)
CategoryCards[5].click()
leftPanel = LeftPanelSection(driver)
BookStoreApp_Login_Menu = leftPanel.find_element(*LeftPanelLocators.BookStoreApp_Login)
BookStoreApp_BookStore_Menu = leftPanel.find_element(*LeftPanelLocators.BookStoreApp_BookStore)
BookStoreApp_Profile_Menu = leftPanel.find_element(*LeftPanelLocators.BookStoreApp_Profile)
BookStoreApp_BookStoreAPI_Menu = leftPanel.find_element(*LeftPanelLocators.BookStoreApp_BookStoreAPI)
assert BookStoreApp_Login_Menu.text == LeftPanelParameters.LOGIN_MENU_TEXT
assert BookStoreApp_BookStore_Menu.text == LeftPanelParameters.BOOKSTORE_MENU_TEXT
assert BookStoreApp_Profile_Menu.text == LeftPanelParameters.PROFILE_MENU_TEXT
assert BookStoreApp_BookStoreAPI_Menu.text == LeftPanelParameters.BOOKSTOREAPI_MENU_TEXT
bookStoreAppPage = BookStoreApplicationPage(driver)
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.Login_button).is_displayed() == True
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.SearchBox_Input).is_displayed() == True
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.TableHeader_1).text == BookPageParameters.BOOKTABLE_HEADER_1
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.TableHeader_2).text == BookPageParameters.BOOKTABLE_HEADER_2
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.TableHeader_3).text == BookPageParameters.BOOKTABLE_HEADER_3
assert bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.TableHeader_4).text == BookPageParameters.BOOKTABLE_HEADER_4
# close application
actions.UserAction.tearDown_env(driver)
def test_SearchBook_In_BookStore(self):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements(*HomePageLocators.CATEGORUCARDS)
CategoryCards[5].click()
#
bookStoreAppPage = BookStoreApplicationPage(driver)
searchbox = bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.SearchBox_Input)
bookStoreAppPage.send_keys(searchbox, BookPageParameters.SEARCH_TEXT)
firstBookFound = bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.BookName_1)
assert firstBookFound.text == BookPageParameters.BOOKNAME
# close application
actions.UserAction.tearDown_env(driver)
def test_SearchBook_NotFund(self):
driver = actions. UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements(*HomePageLocators.CATEGORUCARDS)
CategoryCards[5].click()
bookStoreAppPage = BookStoreApplicationPage(driver)
searchbox = bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.SearchBox_Input)
bookStoreAppPage.send_keys(searchbox, BookPageParameters.SEARCH_TEXT_NOTFOUND)
#
with pytest.raises(NoSuchElementException):
firstBookFound = bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.BookName_1)
# close application
actions.UserAction.tearDown_env(driver)
def test_Login(self):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
CategoryCards[5].click()
bookStoreAppPage = BookStoreApplicationPage(driver)
login_button = bookStoreAppPage.find_element_wait(BookStoreApplicationPageLocators.Login_button)
login_button.click()
loginPage = LoginPage(driver)
login_button = loginPage.find_element_wait(LoginPageLocators.Login_button)
username_input = loginPage.find_element_wait(LoginPageLocators.UserName_Input)
password_input = loginPage.find_element_wait(LoginPageLocators.Password_Input)
username_input.clear()
username_input.send_keys(UniversialParameters.USERNAME)
password_input.clear()
password_input.send_keys(UniversialParameters.PASSWORD)
login_button.click()
signedin_username = bookStoreAppPage.find_element(*BookStoreApplicationPageLocators.SIGNIN_USERNAME)
assert signedin_username.text == UniversialParameters.USERNAME
# close application
actions.UserAction.tearDown_env(driver)
def test_Elements_TextBox(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
# Click on Elements tab
CategoryCards[0].click()
elementsPage = ElementsPage(driver)
element_menu = elementsPage.find_element_wait(ElementsPageLocators.Elements_Menu)
textBox_menu = elementsPage.find_element_wait(ElementsPageLocators.TextBox_Menu)
checkBox_menu = elementsPage.find_element_wait(ElementsPageLocators.CheckBox_Menu)
radioButton_menu = elementsPage.find_element_wait(ElementsPageLocators.RadioButton_Menu)
webTables_menu = elementsPage.find_element_wait(ElementsPageLocators.WebTables_Menu)
buttons_menu = elementsPage.find_element_wait(ElementsPageLocators.Buttons_Menu)
links_menu = elementsPage.find_element_wait(ElementsPageLocators.Links_Menu)
brokenLinks_Image_menu =elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_Image_Menu)
upload_and_download_menu = elementsPage.find_element_wait(ElementsPageLocators.UploadAndDownload_Menu)
dynamicProperties_menu = elementsPage.find_element_wait(ElementsPageLocators.DynamicProperties_Menu)
assert element_menu.is_displayed() == True
assert textBox_menu.is_displayed() == True
assert checkBox_menu.is_displayed() == True
assert radioButton_menu.is_displayed() == True
assert webTables_menu.is_displayed() == True
assert buttons_menu.is_displayed() == True
assert links_menu.is_displayed() == True
assert brokenLinks_Image_menu.is_displayed() == True
assert upload_and_download_menu.is_displayed() == True
assert dynamicProperties_menu.is_displayed() == True
record_property("Elements menu items", "TextBox, CheckBox, RadioButton, WebTable, Buttons, Links, BrokenLink_Image, Upload and Download, Dynamic Properties")
textBox_menu.click()
fullname = elementsPage.find_element(*ElementsPageLocators.TextBox_FullName_Input)
email = elementsPage.find_element(*ElementsPageLocators.TextBox_Email_Input)
current_address = elementsPage.find_element(*ElementsPageLocators.TextBox_CurrentAddress_Input)
permanent_address = elementsPage.find_element(*ElementsPageLocators.TextBox_PermanentAddress_Input)
submit_button = elementsPage.find_element(*ElementsPageLocators.TextBox_Submit_Button)
assert fullname.is_displayed() == True
assert email.is_displayed() == True
assert current_address.is_displayed() == True
assert permanent_address.is_displayed() == True
assert submit_button.is_displayed() == True
record_property("TextBox", "")
elementsPage.send_keys(fullname, ElementsPageParameters.FULLNAME)
elementsPage.send_keys(email, ElementsPageParameters.EMAIL)
elementsPage.send_keys(current_address, ElementsPageParameters.CURRENTADDRESS)
elementsPage.send_keys(permanent_address, ElementsPageParameters.CURRENTADDRESS)
submit_button.click()
outputs = elementsPage.find_elements_wait(ElementsPageLocators.TextBox_Outputs)
Name = outputs[0].text
Email = outputs[1].text
currentAddress = outputs[2].text
permanentAddress = outputs[3].text
assert Name.find(ElementsPageParameters.FULLNAME) > 0
assert Email.find(ElementsPageParameters.EMAIL) > 0
assert currentAddress.find(ElementsPageParameters.CURRENTADDRESS) > 0
assert permanentAddress.find(ElementsPageParameters.CURRENTADDRESS) > 0
record_property("TextBox result", "")
# close application
actions.UserAction.tearDown_env(driver)
def test_Elements_CheckBox(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
# Click on Elements tab
CategoryCards[0].click()
elementsPage = ElementsPage(driver)
checkBox_menu = elementsPage.find_element_wait(ElementsPageLocators.CheckBox_Menu)
assert checkBox_menu.is_displayed() == True
checkBox_menu.click()
home_checkbox = elementsPage.find_element_wait(ElementsPageLocators.Home_checkBox)
home_checkbox.click()
checkbox_result = elementsPage.find_elements_wait(ElementsPageLocators.Home_checkBox_values)
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[1].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[2].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[3].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[4].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[5].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[6].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[7].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[8].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[9].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[10].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[11].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[12].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[13].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[14].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[15].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[16].text) > 0
assert ElementsPageParameters.HomeCheckBox_Value.find(checkbox_result[17].text) > 0
record_property("Home CheckBox Values: ", ElementsPageParameters.HomeCheckBox_Value)
# close application
actions.UserAction.tearDown_env(driver)
def test_Element_RadioButton(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
# Click on Elements tab
CategoryCards[0].click()
elementsPage = ElementsPage(driver)
RadioButtonMenu = elementsPage.find_element_wait(ElementsPageLocators.RadioButton_Menu)
RadioButtonMenu.click()
YesRadioButton = elementsPage.find_element_wait(ElementsPageLocators.Yes_RadioButton)
ImpressiveRadioButton = elementsPage.find_element_wait(ElementsPageLocators.Impressive_RadioButton)
NoRadioButton = elementsPage.find_element_wait(ElementsPageLocators.No_RadioButton)
assert YesRadioButton.is_displayed() == True
assert ImpressiveRadioButton.is_displayed() == True
assert NoRadioButton.is_displayed() == True
YesRadioButton.click()
RadiobuttonSelected = elementsPage.find_element_wait(ElementsPageLocators.RadioButton_Selected)
assert RadiobuttonSelected.text.find(ElementsPageParameters.RADIOBUTTONSELECTED_YES) >0
ImpressiveRadioButton.click()
RadiobuttonSelected = elementsPage.find_element_wait(ElementsPageLocators.RadioButton_Selected)
assert RadiobuttonSelected.text.find(ElementsPageParameters.RADIOBUTTONSELECTED_IMPRESSIVE)
record_property("Element page Radio button: ", "Yes and Impressive")
# close application
actions.UserAction.tearDown_env(driver)
def test_Element_WebTable(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
# Click on Elements tab
CategoryCards[0].click()
elementsPage = ElementsPage(driver)
webTableMenu = elementsPage.find_element_wait(ElementsPageLocators.WebTables_Menu)
webTableMenu.click()
TableHeaders = elementsPage.find_elements_wait(ElementsPageLocators.TableHeaders)
assert (len(TableHeaders)) == 7
assert elementsPage.find_element_wait(ElementsPageLocators.FirstHeader).text.strip() == ElementsPageParameters.FIRSTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.SecondHeader).text.strip() == ElementsPageParameters.SECONDTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.ThirdHeader).text.strip() == ElementsPageParameters.THIRDTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.FourthHeader).text.strip() == ElementsPageParameters.FOURTHTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.FifthHeader).text.strip() == ElementsPageParameters.FIFTHTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.SixthHeader).text.strip() == ElementsPageParameters.SIXTHTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.SeventhHeader).text.strip() == ElementsPageParameters.SEVENTHTABLEHEADER
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell1).text.strip() == ElementsPageParameters.R1C1
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell2).text.strip() == ElementsPageParameters.R1C2
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell3).text.strip() == ElementsPageParameters.R1C3
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell4).text.strip() == ElementsPageParameters.R1C4
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell5).text.strip() == ElementsPageParameters.R1C5
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell6).text.strip() == ElementsPageParameters.R1C6
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell7_Edit).get_attribute("title") == ElementsPageParameters.TableCell_EditTooltip
assert elementsPage.find_element_wait(ElementsPageLocators.Row1Cell7_Delete).get_attribute("title") == ElementsPageParameters.TableCell_DeleteTooltip
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell1).text.strip() == ElementsPageParameters.R2C1
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell2).text.strip() == ElementsPageParameters.R2C2
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell3).text.strip() == ElementsPageParameters.R2C3
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell4).text.strip() == ElementsPageParameters.R2C4
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell5).text.strip() == ElementsPageParameters.R2C5
assert elementsPage.find_element_wait(
ElementsPageLocators.Row2Cell6).text.strip() == ElementsPageParameters.R2C6
assert elementsPage.find_element_wait(ElementsPageLocators.Row2Cell7_Edit).get_attribute(
"title") == ElementsPageParameters.TableCell_EditTooltip
assert elementsPage.find_element_wait(ElementsPageLocators.Row2Cell7_Delete).get_attribute(
"title") == ElementsPageParameters.TableCell_DeleteTooltip
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell1).text.strip() == ElementsPageParameters.R3C1
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell2).text.strip() == ElementsPageParameters.R3C2
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell3).text.strip() == ElementsPageParameters.R3C3
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell4).text.strip() == ElementsPageParameters.R3C4
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell5).text.strip() == ElementsPageParameters.R3C5
assert elementsPage.find_element_wait(
ElementsPageLocators.Row3Cell6).text.strip() == ElementsPageParameters.R3C6
assert elementsPage.find_element_wait(ElementsPageLocators.Row3Cell7_Edit).get_attribute(
"title") == ElementsPageParameters.TableCell_EditTooltip
assert elementsPage.find_element_wait(ElementsPageLocators.Row3Cell7_Delete).get_attribute(
"title") == ElementsPageParameters.TableCell_DeleteTooltip
# close application
actions.UserAction.tearDown_env(driver)
def test_Elements_Button(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
homePage = HomePage(driver)
CategoryCards = homePage.find_elements_wait(HomePageLocators.CATEGORUCARDS)
# Click on Elements tab
CategoryCards[0].click()
elementsPage = ElementsPage(driver)
elementsPage.find_element_wait(ElementsPageLocators.Buttons_Menu).click()
Buttons = elementsPage.find_elements_wait(ElementsPageLocators.Buttons)
assert (len(Buttons)) == 3
elementsPage.doubleClick(Buttons[0])
doubleclickmessage = elementsPage.find_element_wait(ElementsPageLocators.DoubleClickMessage)
assert doubleclickmessage.text.strip() == ElementsPageParameters.DOUBLECLICKMESSAGE
elementsPage.rightClick(Buttons[1])
rightclickmessage = elementsPage.find_element_wait(ElementsPageLocators.RightClickMessage)
assert rightclickmessage.text.strip() == ElementsPageParameters.RIGHTCLICKMESSAGE
Buttons[2].click()
clickmemessage = elementsPage.find_element_wait(ElementsPageLocators.ClickMeMessage)
assert clickmemessage.text.strip() == ElementsPageParameters.CLICKMEMESSAGE
# close application
actions.UserAction.tearDown_env(driver)
def test_Elements_Link(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
actions.UserAction.AccessElementPages(driver)
elementsPage = ElementsPage(driver)
elementsPage.find_element_wait(ElementsPageLocators.Links_Menu).click()
assert elementsPage.find_element(*ElementsPageLocators.Links_Home).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_HomeFa2mX).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_Created).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_NoContent).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_Moved).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_BadRequest).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_Unauthorized).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_Forbidden).is_displayed() == True
assert elementsPage.find_element(*ElementsPageLocators.Links_NotFound).is_displayed() == True
elementsPage.find_element(*ElementsPageLocators.Links_Created).click()
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKCREATED_RESPONSE.strip()
elementsPage.find_element_wait(ElementsPageLocators.Links_NoContent).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKNOCONTENT_RESPONSE
elementsPage.find_element(*ElementsPageLocators.Links_Moved).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKMOVED_RESPONSE
elementsPage.find_element(*ElementsPageLocators.Links_BadRequest).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKBADREQUEST_RESPONSE
elementsPage.find_element(*ElementsPageLocators.Links_Unauthorized).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKUNAUTHORIZED_RESPONSE
elementsPage.find_element(*ElementsPageLocators.Links_Forbidden).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKFORBIDDEN_RESPONSE
elementsPage.find_element(*ElementsPageLocators.Links_NotFound).click()
time.sleep(2)
response = elementsPage.find_element_wait(ElementsPageLocators.Links_Response).text
assert response.strip() == ElementsPageParameters.LINKNOTFOUND_RESPONSE
# close application
actions.UserAction.tearDown_env(driver)
def test_BrokenLinks(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
actions.UserAction.AccessElementPages(driver)
elementsPage = ElementsPage(driver)
elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_Image_Menu).click()
BrokenImage = elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_BrokenImage)
ValidLink = elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_ValidLink)
BrokenLink = elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_BrokenLink)
elementsPage.find_element_wait(ElementsPageLocators.BrokenLinks_ValidLink).click()
assert driver.current_url == UniversialParameters.BASEURL+"/"
# close application
actions.UserAction.tearDown_env(driver)
def test_UploadAndDownload(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
actions.UserAction.AccessElementPages(driver)
elementsPage = ElementsPage(driver)
elementsPage.find_element_wait(ElementsPageLocators.UploadAndDownload_Menu).click()
DownloadButton = elementsPage.find_element_wait(ElementsPageLocators.Download_Button)
DownloadButton.click()
time.sleep(5)
fileExist = os.path.exists(ElementsPageParameters.SampleFileLocator)
assert fileExist == True
os.remove(ElementsPageParameters.SampleFileLocator)
# Pending on Upload files feature testing, Searching for solution.
# close application
actions.UserAction.tearDown_env(driver)
def test_DynamicProperties(self, record_property):
driver = actions.UserAction.setUp_env(self)
# click button
actions.UserAction.AccessElementPages(driver)
elementsPage = ElementsPage(driver)
elementsPage.find_element_wait(ElementsPageLocators.DynamicProperties_Menu).click()
assert elementsPage.find_element_wait(ElementsPageLocators.Dynamic_WillEnable5Seconds_Button).is_displayed() == True
time.sleep(5)
assert elementsPage.find_element_wait(ElementsPageLocators.Dynamic_visibleAfter5Seconds_Buttons).is_displayed() == True
# close application
actions.UserAction.tearDown_env(driver)
|
24,956 | c45c56e84c4e97a83b813b013e3df2121290ab27 | from polygon import polygon
from functools import lru_cache
class poly_seq:
'''
This is a ploygon sequence class that can accept:
- number of vertices for largest polygon in the sequence
- common circumradius for all polygons
'''
def __init__(self, edges: int, radius: 'int or float') -> None:
'''
Initializer method
self.edges -> Input
self.R -> Input
self.eff_ratio -> Will get populated while calling max_efficient()
'''
if isinstance(edges, int) and edges > 0:
self.edges = edges
else:
raise ValueError('Number of vertices for largest polygon in the sequence must be integer > 0')
if isinstance(radius, (int, float)) and radius > 0:
self.R = radius
else:
raise ValueError('Common Circumradius must be float or integer > 0')
self.eff_ratio = {}
def __repr__(self):
'''
repr function for poly_seq class. Will give info on
No: of edges(n) of largest polyon in the sequence and common Circumradius(R)
'''
s1 = 'poly_seq class instance : Largest Polygon class in the sequence has '
s2 = f'{self.edges} sides with a common circumradius = {self.R}'
return s1 + s2
def __len__(self):
'''
Returns length of polygon sequence
'''
return self.edges
def __getitem__(self, idx: int) -> tuple:
'''
getitem method that will help us to call the polygon sequences created by index as below
s1 = poly_seq(6, 10)
s1[-1], s1[0], s[5] etc.
getitem based on the index it received, calls method '_calc_ratio' by supplying index+1 (to avoid 0 sides) &
circumradius used while creating poly_seq class.
'''
# This is to handle -ve indexes like -1 s1[-1] should return last element in sequence
if idx < 0:
idx = self.edges + idx
# Here s < 0 is important because this will handle large negative 's' like s = -9999
if idx < 0 or idx >= self.edges:
raise ValueError(
f'Idx Unavailable. For no: of edges = {self.edges}, available indexes are 0 to {int(self.edges - 1)}')
# for indexes 0 & 1 i.e. sides = 1 and sides =2 return area/perimeter ratio as 0 as they are not polygons
if idx < 2:
ratio = 0
else:
ratio = poly_seq._calc_ratio(idx + 1, self.R)
msg = f'Area-Perimeter ration for polygon of side {idx + 1} is {ratio}'
return msg, ratio
@staticmethod # Static methods are methods that are bound to a class rather than its object.
@lru_cache(2 ** 10) ##powers of 2
def _calc_ratio(num_edges: int, c_radius: 'int or float') -> float:
'''
This is a method attached to class rather than object. This means we can call this method for parameters that are
not used while creating object.
eg: s1 = poly_seq(6, 10). Here sides = 6, radius =10.
However we can call s1._calc_ratio(10, 20) ie for sides of 10 and radius = 20 without issues as we are not using self here.
We are using lru_cache to store the values already claculated so that repetitive calculations can be avoided.
This method is directly called in __getitem__() and indirectly called from max_efficient() methods.
'''
poly = polygon(num_edges, c_radius)
return poly.area / poly.perimeter
@property
def max_efficient(self) -> str:
'''
This method returns the Polygon with the highest area: perimeter ratio.
Calls _getitem__ for each edge starting from 0 till self.edges -1 .
__getitem__ fetches area-perimeter ratio then by calling _calc_ratio.
If n = 0, __getitem__ gives area-per ratio for side = 1
n = 5, __getitem__ gives area-per ratio for side = 6
'''
for n in range(self.edges):
self.eff_ratio[n + 1] = self.__getitem__(n)[1]
max_eff = max(self.eff_ratio, key=self.eff_ratio.get)
# Reference for 'max' usage with 'key': https://stackoverflow.com/questions/18296755/python-max-function-using-key-and-lambda-expression
s1 = f'Polygon with max efficiency for circumradius of {self.R} is one with {max_eff} sides & '
s2 = f'Area-perimeter ratio for the same is {round(self.eff_ratio.get(max_eff), 4)}'
return s1 + s2
|
24,957 | 8cfba44526f2968a1ed3eb29fa326d18d245a7d0 | from random import uniform
import arviz as az
import pymc3 as pm
import seaborn
import matplotlib.pyplot as plt
from two_models import make_data
# Generate data
_, _, _, _, xs_all, ys_all = make_data()
# Infer parameters
# nsample = 500
# nchains = 2
nsample = 5000
nchains = 16
with pm.Model() as model:
b0 = pm.Normal("b0", 0, sigma=20)
b1 = pm.Normal("b1", 0, sigma=20)
y_est = b0 + b1 * xs_all
sigma = pm.HalfCauchy("sigma", beta=10, testval=1.0)
likelihood = pm.Normal("y", mu=y_est, sigma=sigma, observed=ys_all)
# # define prior for StudentT degrees of freedom
# # InverseGamma has nice properties:
# # it's continuous and has support x ∈ (0, inf)
# nu = pm.InverseGamma("nu", alpha=1, beta=1)
# # define Student T likelihood
# likelihood = pm.StudentT("likelihood", mu=y_est, sigma=2, nu=nu, observed=ys_all)
trace = pm.sample(nsample, return_inferencedata=True, chains=nchains)
print(type(trace))
# Plot
axs = az.plot_trace(trace, figsize=(16, 12))
fig = axs[0, 0].get_figure()
fig.savefig("trace.png")
# print(trace.posterior["intercept"].stack())
plt.figure(figsize=(9, 7))
print(trace)
print(trace.posterior.mean())
stacked = az.extract_dataset(trace)
print("HEJ")
p = seaborn.jointplot(
x=stacked.b1.values, y=stacked.b0.values, kind="hex", color="#4CB391"
)
fig = p.fig
axs = fig.axes
fig.suptitle(trace.posterior.mean())
# p.ax_joint.collections[0].set_alpha(0)
axs[0].set_xlabel("b1")
axs[0].set_ylabel("b0")
fig.subplots_adjust(top=0.95, bottom=0.0) # Reduce plot to make room
fig.tight_layout()
fig.savefig("heat.png")
|
24,958 | 76dac97adc34ab7fa1685a86ebaa7873ad96cd1b | import os
import sys
import pandas as pd
import tensorflow.compat.v1 as tf
tf.disable_v2_behavior()
from DataFactory import FeatureDictionary,DataParser
import numpy as np
from model import PairWiseDeepFm
import random
import pickle
from utils import *
random.seed(123)
import operator
df_list = []
for line in open("./data/u.user"):
line_list = line.strip().split('|')
t_list = []
for val in line_list:
t_list.append(val)
df_list.append(t_list)
u_df = pd.DataFrame(df_list);
u_df.columns = ['uid','age','sex','occupation','zipCode']
u_df.to_csv('./data/user_feat.csv',index = None)
i_list = []
for line in open('./data/u.item', encoding='ISO-8859-1'):
line_list = line.strip().split('|')
t_list = []
for val in line_list:
t_list.append(val)
i_list.append(t_list)
i_df = pd.DataFrame(i_list)
columns = ['iid','iname','itime','null','iwebsite']
for i in range(len(t_list)-len(columns)):
columns.append('feat'+str(i));
i_df.columns = columns
i_df.to_csv('./data/item_feat.csv',index = None)
ignore_cols = ['zipCode','uid','iid','null','iwebsite','itime','iname']
numeric_cols = ['age']
feat_dict = FeatureDictionary(u_df,i_df,ignore_cols,numeric_cols)
dp = DataParser(feat_dict,u_df,i_df,ignore_cols,numeric_cols)
def evaltest(sess):
liens = open('./data/movielens-100k-test.txt').readlines()
userPosTest = pickle.load(open('./data/userTestPos.pkl','rb'))
res = []
for u in userPosTest.keys():
if len(userPosTest[u]) < 10:
continue
user,itemp,user_feat,user_feat_val,item_feat,item_feat_val,label = dp.get_data_test(u)
feat_catep = np.hstack((user_feat,item_feat))
feat_val1 = np.hstack((user_feat_val,item_feat_val))
label1 = np.reshape(label,(len(label),1))
score = model.eval(sess,user,itemp,feat_catep,feat_val1,label1)[0]
score = np.reshape(np.array(score),(-1,))
score_label = zip(score,label)
score_label = sorted(score_label, key=lambda x: x[0], reverse=True)
r = [x[1] for x in score_label[:10]]
res.append(ndcg_at_k(r))
if len(res) > 50:
break
return np.mean(res)
# gpu_options = tf.GPUOptions(allow_growth =True)
# with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options)) as sess:
with tf.Session() as sess:
model = PairWiseDeepFm(0.001,61,22)
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
sys.stdout.flush()
lines = open('./data/movielens-100k-train.txt','rb').readlines()
batch_size = 32
for d in range(50):
random.shuffle(lines)
epoch_size = round(len(lines) / batch_size)
ind = 1
time = 0
while ind+batch_size < len(lines):
time += 1
user,itemp,itemn,user_feat,user_feat_val,itemp_feat,itemp_feat_val,itemn_feat,itemn_feat_val,label = dp.get_batch_data(lines,ind,batch_size)
ind = ind + batch_size
feat_catep = np.hstack((user_feat,itemp_feat))
feat_val1 = np.hstack((user_feat_val,itemp_feat_val))
feat_caten = np.hstack((user_feat,itemn_feat))
feat_val2 = np.hstack((user_feat_val,itemn_feat_val))
label = np.reshape(label,(len(label),1))
loss = model.fit(sess,user,itemp,itemn,feat_catep,feat_val1,feat_caten,feat_val2,label)
if time % 100 == 0:
print('Epoch %d Global_step %d\tTrain_loss: %.4f' %(d,time,loss))
sys.stdout.flush()
if time % 300 == 0:
print('Epoch %d Global_step %d\tTrain_loss: %.4f\tEval_NDCG@10: %.4f' %(d,time,loss,evaltest(sess)))
sys.stdout.flush()
|
24,959 | 9a2186d83be9b3b6838e9152f14d532188aa52da | import numpy as np
from sklearn.datasets import load_iris
from sklearn.ensemble import RandomForestClassifier
if __name__ == '__main__':
np.random.seed(12345)
iris = load_iris()
X = iris.data
y = iris.target
model = RandomForestClassifier()
idx = np.arange(X.shape[0])
np.random.shuffle(idx)
k_fold = 10
slice_size = X.shape[0] / float(k_fold)
score_sum = 0
for i in range(0, k_fold):
start = int(i * slice_size)
end = int((i + 1) * slice_size)
X_train = X[np.append(idx[0:start], idx[end:])]
y_train = y[np.append(idx[0:start], idx[end:])]
X_test = X[idx[start:end]]
y_test = y[idx[start:end]]
clf = model.fit(X_train, y_train)
score = clf.score(X_test, y_test)
score_sum += score
print("Average Cross Validation Score", score_sum / k_fold)
|
24,960 | 0c2c4cb8fdafe0f3d968a2d8c7fffcbd654f4a2b | #!/usr/bin/env python
import roslib
roslib.load_manifest('path_planner')
import rospy
import actionlib
from uf_common.msg import MoveToAction, MoveToGoal
from visualization_msgs.msg import Marker,MarkerArray
from nav_msgs.msg import Odometry
from std_msgs.msg import Header,ColorRGBA
from sensor_msgs.msg import Image,PointCloud2,PointField
from geometry_msgs.msg import Pose,Quaternion,Point,PointStamped
from path_planner.msg import TraverseBuoysAction
import random,numpy,threading,math,time
from helper_functions import distance, center_of_points, find_pair_center, max_dist, get_perp, three_d
from uf_common.orientation_helpers import lookat, get_perpendicular,PoseEditor
rospy.init_node('buoy_repulsor')
global current_position,channel_width,end_position,ecef_position,avoid
channel_width = 3
avoid = False
current_position = [0,0]
end_position = [0,0,0]
ecef_position = [1000,10000,0]
waypoint = actionlib.SimpleActionClient('moveto', MoveToAction)
print 'connecting to action client'
#waypoint.wait_for_server()
def find_closest_buoy(msg,color):
global current_position,green_buoy,red_buoy
buoy = []
for marker in msg.markers:
if ((not buoy) and (marker.color == color)):
buoy = [marker.pose.position.x,marker.pose.position.y]
elif (buoy):
if (distance((marker.pose.position.x,marker.pose.position.y),current_position) < distance(buoy,current_position) and (marker.color == color)):
buoy = [marker.pose.position.x,marker.pose.position.y]
if (not buoy):
return [False,[0,0]]
else:
return [True,buoy]
def find_best_pair_center(msg):
global current_position
red = ColorRGBA(1.0,0,0,1.0)
green = ColorRGBA(0,1.0,0,1.0)
green_buoy = []
red_buoy = []
ans = []
min_dist = 100
for marker in msg.markers:
if (marker.color == red):
red_buoy.append([marker.pose.position.x,marker.pose.position.y])
if (marker.color == green):
green_buoy.append([marker.pose.position.x,marker.pose.position.y])
for i in red_buoy:
for j in green_buoy:
dist = distance(i,j)
if (dist > 1.0 and dist < channel_width and distance(current_position,center_of_points((i,j))) < min_dist):
ans = [i,j]
if (not ans):
return [False,[0,0]]
else:
return [True,ans]
def send_waypoint_wait(point,orientation):
waypoint.send_goal_and_wait(current_pose_editor.relative(numpy.array([point[0], point[1], 0])).as_MoveToGoal(speed = .2))
def send_waypoint(point,orientation):
waypoint.send_goal(current_pose_editor.relative(numpy.array([point[0], point[1], 0])).as_MoveToGoal(speed = .2))
def buoy_callback(msg):
global running,avoid
if (running and not(avoid)):
global current_position
red = ColorRGBA(1.0,0,0,1.0)
green = ColorRGBA(0,1.0,0,1.0)
red_pos = find_closest_buoy(msg,red)
green_pos = find_closest_buoy(msg,green)
goal = find_best_pair_center(msg)
'''
if (green_pos[0] and red_pos[0]):
goal = find_pair_center(red_pos[1],green_pos[1])
if (goal[0]):
mid_goal = goal[1] + 1.5*get_perp(red_pos[1],green_pos[1])
print 'going to center of channel', mid_goal
waypoint.send_goal_and_wait(current_pose_editor.look_at_without_pitching(current_pose_editor.relative(numpy.array([mid_goal[0].mid_goal[1],0])).position))
send_waypoint_wait(mid_goal,0)
waypoint.send_goal_and_wait(current_pose_editor.look_at_without_pitching(goal[1]))
waypoint.send_goal(current_pose_editor.forward(3).as_MoveToGoal(speed = .4))
'''
if (goal[0]):
point = center_of_points((goal[1][0],goal[1][1]))
mid_goal = point + .5*get_perp(goal[1][0],goal[1][1])
print 'goal',goal,'mid_goal',mid_goal
print 'going to center of channel', mid_goal
waypoint.send_goal_and_wait(current_pose_editor.look_at_rel_without_pitching(current_pose_editor.relative(numpy.array([mid_goal[0],mid_goal[1],0])).position))
print 'aligned'
print 'going for mid_goal',mid_goal
send_waypoint_wait(mid_goal,0)
print 'align again'
waypoint.send_goal_and_wait(current_pose_editor.look_at_rel_without_pitching([point[0],point[1],0]))
print 'open loop'
waypoint.send_goal(current_pose_editor.forward(1).as_MoveToGoal(speed = .2))
print 'done'
elif(green_pos[0]):
print 'going to green buoy: ',green_pos[1]
#waypoint.send_goal_and_wait(current_pose_editor.look_at_without_pitching([green_pos[1][0],(green_pos[1][0] - .5),0]))
send_waypoint((green_pos[1][0],green_pos[1][1] - .5),0)
rospy.sleep(1)
elif(red_pos[0]):
print 'going to red buoy: ',red_pos[1]
#waypoint.send_goal_and_wait(current_pose_editor.look_at_without_pitching([red_pos[1][0],(red_pos[1][0] + .5),0]))
send_waypoint((red_pos[1][0],red_pos[1][1] + .5),0)
rospy.sleep(1)
rospy.Subscriber('buoy_markers',MarkerArray,buoy_callback)
#-----------------------------------------------------------------------------------------
def pointcloud_callback(msg):
if (running):
'''
global avoid
dist = .5
cloud = pointcloud2_to_xyz_array(msg)
if (len(cloud) > 0):
for i in cloud:
if (i[0] < dist or i[1] < dist):
avoid = True
print "avoiding obstacle!"
if (i[1] < 0):
waypoint.send_goal_and_wait(current_pose_editor.left(.2).as_MoveToGoal(speed = .8))
else:
waypoint.send_goal_and_wait(current_pose_editor.right(.2).as_MoveToGoal(speed = .8))
else:
avoid = False
'''
rospy.Subscriber("/cloud_3d",PointCloud2,pointcloud_callback)
#-----------------------------------------------------------------------------------------
def pose_callback(msg):
global current_position,current_pose_editor
current_pose_editor = PoseEditor.from_Odometry(msg)
current_position = (msg.pose.pose.position.x,msg.pose.pose.position.y)
rospy.Subscriber('/odom', Odometry, pose_callback)
def ecef_callback(msg):
global ecef_position
ecef_position = [msg.point.x,msg.point.y,msg.point.z]
rospy.Subscriber('/gps2_parser/pos', PointStamped, ecef_callback)
def task_pos_callback(msg):
global end_position
end_position = [msg.point.x,msg.point.y,msg.point.z]
rospy.Subscriber("/task_waypoints",PointStamped,task_pos_callback)
#-----------------------------------------------------------------------------------------
class TraverseBuoysServer:
def __init__(self):
self.server = actionlib.SimpleActionServer('traverse_buoys', TraverseBuoysAction, self.execute, False)
global running
waypoint.cancel_goal()
running = False
self.server.start()
print "path_planner server started"
def execute(self,goal):
global running
#(numpy.linalg.norm(numpy.array(ecef_position)-numpy.array(end_position)) > 5) and
while ( not(self.server.is_preempt_requested())):
running = True
running = False
if (numpy.linalg.norm(numpy.array(ecef_position)-numpy.array(end_position)) < 5):
self.server.set_succeeded()
else:
self.server.set_preempted()
server = TraverseBuoysServer()
rospy.spin()
waypoint.cancel_goal()
|
24,961 | 3a525a36c2188a074229dbf7969e0b444d92a3ed | try:
from setuptools import setup
except ImportError:
from distutils.core import setup
from distutils.errors import DistutilsSetupError
DESCRIPTION = 'pyBaseX: a Python adapter for BaseX REST interface'
LONG_DESCRIPTION = """
pyBaseX: a Python adapter for BaseX REST interface
--------------------------------------------------
pyBaseX is a Python package that provides functionality to interact with BaseX via REST interface.
The main features include:
* CRUD methods for databases and documents
* XPATH queries execution
"""
AUTHOR_INFO = [
('Luca Lianas', 'lucalianas@gmail.com')
]
MAINTAINER_INFO = AUTHOR_INFO
AUTHOR = '', ''.join(t[0] for t in AUTHOR_INFO)
AUTHOR_EMAIL = '', ''.join('<%s>' % t[1] for t in AUTHOR_INFO)
MAINTAINER = ", ".join(t[0] for t in MAINTAINER_INFO)
MAINTAINER_EMAIL = ", ".join("<%s>" % t[1] for t in MAINTAINER_INFO)
URL = 'https://github.com/lucalianas/pyBaseX'
DOWNLOAD_URL = 'https://github.com/lucalianas/pyBaseX/releases'
try:
with open("NAME") as f:
NAME = f.read().strip()
with open("VERSION") as f:
VERSION = f.read().strip()
except IOError:
raise DistutilsSetupError("failed to read name/version info")
setup(
name=NAME,
version=VERSION,
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
download_url=DOWNLOAD_URL,
license='MIT License',
platforms=['any'],
keywords=['BaseX', 'REST', 'HTTP', 'XPATH', 'XML', 'database', 'python'],
maintainer=MAINTAINER,
maintainer_email=MAINTAINER_EMAIL,
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.7',
'Intended Audience :: Developers',
],
packages=[
'pybasex',
'pybasex.utils',
],
requires=[
'setuptools',
'requests',
'lxml',
],
install_requires=[
'setuptools',
'requests',
'lxml',
]
)
|
24,962 | 92dbeab7f2eb634e724465473e75e1b72f1b4bcd | import pytest
from datetime import datetime
from uctl2_back.team_state import TeamState, TransitionTime
from uctl2_back.stage import Stage
@pytest.fixture
def team_state():
return TeamState(1, 'foo')
@pytest.fixture
def stages():
return [
Stage(0, '', 0, 100, True),
Stage(1, '', 100, 100, False),
Stage(2, '', 200, 100, True),
Stage(3, '', 300, 100, False),
Stage(4, '', 400, 100, True),
]
def test_constructor():
with pytest.raises(ValueError):
TeamState(-1, '')
with pytest.raises(ValueError):
TeamState(0, '')
def test_update_covered_distance_should_RaiseValueError_when_GivenNegativePace(team_state, stages):
with pytest.raises(ValueError):
team_state.update_covered_distance(stages, 34, 89, -7)
with pytest.raises(ValueError):
team_state.update_covered_distance(stages, 798, 16, 0)
def test_update_covered_distance_when_TeamNotStartYet(team_state, stages):
team_state.update_covered_distance(stages, 0, 0.0)
assert 0 == team_state.covered_distance
def test_update_covered_distance_when_TeamIsInFirstStage(team_state, stages):
team_state.start_time = datetime(2020, 4, 21)
team_state.update_covered_distance(stages, 4, 60, default_pace=240)
assert 1000 == team_state.covered_distance
def test_update_covered_distance_when_TeamFinished(team_state, stages):
team_state.start_time = datetime(2020, 4, 21)
team_state.team_finished.set_value(True)
race_length = stages[-1].dst_from_start + stages[-1].length
team_state.update_covered_distance(stages, 4, 60)
assert race_length == team_state.covered_distance
def test_update_covered_distance_when_TeamChangedStage(team_state, stages):
team_state.current_stage.set_value(0)
team_state.current_stage.set_value(4)
team_state.start_time = datetime.now()
team_state.split_times = [ 10, 10, 10 ]
team_state.update_covered_distance(stages, 40, 1)
assert 400 == team_state.covered_distance
def test_update_covered_distance(team_state, stages):
team_state.current_stage.set_value(1)
team_state.current_stage.set_value(1)
team_state.current_time_index = 0
team_state.covered_distance = 100
team_state.start_time = datetime(2020, 4, 21, hour=10)
team_state.split_times = [ 24 ]
# with a pace of 240 secondes for 1km, we have 24 secondes for 100m
# it the length of all stages
team_state.intermediate_times = [
datetime(2020, 4, 21, hour=10, second=24)
]
team_state.update_covered_distance(stages, 4, 60)
assert 1100 == team_state.covered_distance
def test_update_stage_times(team_state, stages):
inter1 = datetime(2020, 4, 21, hour=12)
inter2 = datetime(2020, 4, 21, hour=14)
transition_times = [
TransitionTime(relative_index=1, split_time=3600, inter_time=inter1),
TransitionTime(relative_index=2, split_time=3600, inter_time=inter2)
]
random_date = datetime(year=2020, month=4, day=21)
team_state.intermediate_times = [random_date, random_date, random_date, random_date]
team_state.split_times = [0, 0, 0, 0]
team_state.stage_ranks = [4, 4, 4, 4]
team_state.update_stage_times(transition_times)
assert team_state.intermediate_times == [random_date, inter1, random_date, inter2, random_date, random_date]
assert team_state.split_times == [0, 3600, 0, 3600, 0, 0]
|
24,963 | 083740632159c594e0382e953c63fc47cf43d2a3 | ii = [('GodwWSL2.py', 1), ('RennJIT.py', 1), ('LyttELD.py', 1), ('MartHRW.py', 1), ('MackCNH.py', 1), ('BellCHM.py', 1)] |
24,964 | b2d7ccf5c028aaad6af6cf9b64aab01ec051bf6b | import inspect
import sqlite3
conn = sqlite3.connect(":memory:")
c = conn.cursor()
def run(fn):
for line in inspect.getsourcelines(fn)[0]:
if line.strip().startswith("sql:"):
sql = line.strip()[5:].strip()
break
sql = sql.replace(" . ", " ").split(" ")
sql = " ".join(w if w != "frm" else "from" for w in sql)
sql += "" if sql.endswith(";") else ";"
print(sql)
print("-" * 10)
for row in c.execute(sql):
print(row)
print("")
conn.commit()
|
24,965 | 85eea6a4386340ebd9f2ff1bd845f9d0627ec4d7 | import requests
from templates.text import TextTemplate
from random import choice
import json
import config
def process(input, entities=None):
output = {}
try:
'''
r = requests.get('http://pebble-pickup.herokuapp.com/tweets/random')
data = r.json()
output['input'] = input
output['output'] = TextTemplate(data['tweet']).get_message()
output['success'] = True
'''
with open(config.PICKUP_SOURCE_FILE) as pickup_file:
pick up lines = json.load(pickuplines_file)
pickup_list = pickup['pick up lines']
output['input'] = input
output['output'] = TextTemplate(choice(pickup_list)).get_message()
output['success'] = True
except:
output['success'] = False
return output
|
24,966 | a34bc43632a160df2c7038d44ca8a3da6fa58f56 | #逢7就跳
for i in range(1,101):
if int(i)%7==0 or int(i)%10==7 or int(i)//10==7:
continue
else:
print(i) |
24,967 | 54effd1b347f9fe59bf666869ac76ff6db9ca19d | import random
import re
from selenium.webdriver.common.proxy import ProxyType
import requests
import time
from bs4 import BeautifulSoup
from selenium import webdriver
from threading import Thread
import queue
from HTML_Decoding.gethtmlTostring import filter_tags
#西次ip地址获取 ,页面大小,代理二个参数
from selenium.webdriver.support.wait import WebDriverWait
#匹配是否有ip地址
def ip_exist(text):
compile_rule = re.compile(r'(?<![\.\d])(?:\d{1,3}\.){3}\d{1,3}(?![\.\d])')
match_list = re.findall(compile_rule, text)
if match_list:
return True
else:
return False
def getdaxiangdailiIP():
driver = webdriver.PhantomJS(
executable_path=r'C:\Users\wangquan\phantomjs\bin\phantomjs.exe')
# 设置页面加载超时
daxiangurl="http://tvp.daxiangdaili.com/ip/?tid=556249540865397&num=1&protocol=http"
driver.set_page_load_timeout(5)
# 还原系统代理
proxy = webdriver.Proxy()
proxy.proxy_type = ProxyType.DIRECT
# 代理ip地址
# 将代理设置添加到webdriver.DesiredCapabilities.PHANTOMJS中
proxy.add_to_capabilities(webdriver.DesiredCapabilities.PHANTOMJS)
driver.start_session(webdriver.DesiredCapabilities.PHANTOMJS)
time.sleep(1.5)
Retuenip=""
while True:
driver.get(daxiangurl)
Retuenip=filter_tags(driver.page_source)
if ip_exist(Retuenip):
print("正确获取ip地址-开始爬虫:"+Retuenip)
break
else:
print("获取ip地址失败正在重新获取")
time.sleep(1.5)
#返回IP地址
return Retuenip |
24,968 | 00c72197246b81a4995ca7fd40e11c3aaf66523d | from django.db import models
# Create your models here.
class User(models.Model):
Name = models.CharField(max_length=20)
Status = models.CharField(max_length=500)
Size = models.FloatField()
Latitude = models.FloatField()
Longitude = models.FloatField() |
24,969 | 0d013913be6b35ec5d971bfc5f6c1bd210dc0d2f | #!/usr/bin/env python3
from MenuOptions import MenuOptions
def main():
menu_options = MenuOptions.instance()
opcao = 0
# import ipdb; ipdb.set_trace()
while opcao < 5:
menu_options.show_menu()
opcao = int(input("Digite a opção:"))
if opcao == 1:
menu_options.option_um()
elif opcao == 2:
menu_options.option_dois()
elif opcao == 3:
menu_options.option_tres()
elif opcao == 4:
menu_options.option_quatro()
elif opcao == 5:
res = input("Deseja realmente sair?")
if res.upper() == "S":
break
main() |
24,970 | af699abfbeac7278917acd0ea073726308de6705 | import tarfile
import pathlib
import tempfile
from .distinfo import iter_files
from .wheel import create_dist_info, create_wheel
_NAME = "my_package"
_VERSION = "2"
_TAG = "py3-none-any"
_PACKAGE = pathlib.Path("my_package")
def build_wheel(
wheel_directory, config_settings=None, metadata_directory=None,
):
with tempfile.TemporaryDirectory() as td:
if metadata_directory is None:
td_path = pathlib.Path(td)
dist_info = create_dist_info(
_NAME, _VERSION, _TAG, _PACKAGE, td_path,
)
else:
dist_info = pathlib.Path(metadata_directory)
wheel_path = create_wheel(
_NAME,
_VERSION,
_TAG,
_PACKAGE,
dist_info,
pathlib.Path(wheel_directory),
)
return wheel_path.name
def build_sdist(sdist_directory, config_settings=None):
packager = pathlib.Path(__file__).resolve().parent
sdist_path = pathlib.Path(sdist_directory, f"{_NAME}-{_VERSION}.tar.gz")
with tarfile.open(sdist_path, "w:gz", format=tarfile.PAX_FORMAT) as tf:
for path, relative in iter_files((_PACKAGE, packager)):
tf.add(path, relative.as_posix())
tf.add("pyproject.toml")
return sdist_path.name
|
24,971 | e2f101b7acad8b095d60119053eadfd0114d7323 | from fairseq.models.roberta import RobertaModel
from examples.roberta.wsc import wsc_utils # also loads WSC task and criterion
from examples.roberta.wsc import wsc_task
#roberta = RobertaModel.from_pretrained('checkpoints', 'checkpoint_best.pt', 'WSC/')
roberta = RobertaModel.from_pretrained('/u/scr/mhahn/PRETRAINED/roberta.large.wsc', "model.pt", "/juicier/scr120/scr/mhahn/PRETRAINED/WSC/")
roberta.cuda()
nsamples, ncorrect = 0, 0
for sentence, label in wsc_utils.jsonl_iterator('/juicier/scr120/scr/mhahn/PRETRAINED/WSC/val.jsonl', eval=True):
pred = roberta.disambiguate_pronoun(sentence)
print(sentence)
print(pred)
nsamples += 1
if pred == label:
ncorrect += 1
break
print('Accuracy: ' + str(ncorrect / float(nsamples)))
# Accuracy: 0.9230769230769231
|
24,972 | 3c5d5933b5af96b1d1e6b4c0d5e3f8f021614b18 | # Introduction to Programming
# Project 1
# 16 October 2020
def and_g(a,b):
if a > 1:
print("Number entered is not valid.")
else:
return (a)
def or_g(a,b):
if a > 1:
print("Number entered is not valid.")
if b > 1:
print("Number entered is not valid.")
else:
if a >= b:
return (a)
if b >= a:
return (b)
def not_g(a):
if a == 1:
return 0
else:
return 1
def nand_g(a,b):
if a == 1 and b == 1:
return 0
else:
return 1
def xor_g(a,b):
if a == 1 and b == 1:
return 1
else:
return 0
|
24,973 | 4f550ccd0672f39de52c3fbc336d93c372d0a3a4 | '''
Tensorflow operations that serve as the nodes in the computational graph
Nelson Chen 12/13/16 for Beer Recommendation Project
'''
####################################### Packages ###########################################
import tensorflow as tf
####################################### Functions ###########################################
def tf_repeat(tens,num):
'Broadcast vectors to matrices for element wise multiplication'
with tf.device("/cpu:0"):
tens = tf.expand_dims(tens, 1) # convert column vector to N x 1 matrix
tens_repeated = tf.tile(tens, [1, num]) #repeat vector to expand to N x num matrix
return tens_repeated
def get_pred(feedback_u, item_num, user_num, dim, device):
with tf.device(device):
with tf.variable_scope("var", reuse=True):
bias_global_var = tf.get_variable("bias_global", shape=[])
w_bias_user = tf.get_variable("embd_bias_user", shape=[user_num])
w_bias_item = tf.get_variable("embd_bias_item", shape=[item_num])
w_user = tf.get_variable("embd_user", shape=[user_num, dim])
w_item = tf.get_variable("embd_item", shape=[item_num, dim])
w_feedback = tf.get_variable("feedback", shape=[item_num, dim])
bias_i_mat = tf.transpose(tf_repeat(w_bias_item, user_num))
bias_u_mat = tf_repeat(w_bias_user, item_num)
feedback_vecs = tf.matmul(tf.matmul(feedback_u, w_feedback), tf.transpose(w_item))
ratings = tf.matmul(w_user, tf.transpose(w_item))
ratings = tf.add(ratings,tf.mul(tf.constant(1.0, shape=[user_num, item_num]), bias_global_var))
ratings = tf.add(ratings, bias_i_mat)
ratings = tf.add(ratings, bias_u_mat)
ratings = tf.add(ratings, feedback_vecs)
return ratings
def inference_svd(user_batch, item_batch, feedback_batch, user_num, item_num, dim=5, device="/cpu:0"):
'Build inference part to the training algorithm'
# CPU needed for this part
with tf.device("/cpu:0"):
with tf.variable_scope("var"):
# Defining variables
bias_global = tf.get_variable("bias_global", shape=[])
w_bias_user = tf.get_variable("embd_bias_user", shape=[user_num])
w_bias_item = tf.get_variable("embd_bias_item", shape=[item_num])
w_user = tf.get_variable("embd_user", shape=[user_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
w_item = tf.get_variable("embd_item", shape=[item_num, dim],
initializer=tf.truncated_normal_initializer(stddev=0.02))
w_feedback = tf.get_variable("feedback", shape=[item_num,dim],
initializer = tf.truncated_normal_initializer(stddev=0.02))
# Looking up the batch part of the variables
bias_user = tf.nn.embedding_lookup(w_bias_user, user_batch, name="bias_user")
bias_item = tf.nn.embedding_lookup(w_bias_item, item_batch, name="bias_item")
embd_user = tf.nn.embedding_lookup(w_user, user_batch, name="embedding_user")
embd_item = tf.nn.embedding_lookup(w_item, item_batch, name="embedding_item")
with tf.device(device):
# Compute the feedback parameter vectors
feedback_vecs = tf.matmul(feedback_batch, w_feedback)
# Compute the implicit factors and broadcast to matrix
N_u = tf.pow(tf.reduce_sum(feedback_batch, 1), -0.5)
N_u = tf_repeat(N_u, dim)
# Calculate new user vec with implicit information
embd_user = tf.add(embd_user,tf.mul(N_u,feedback_vecs))
# Compute the inference value
infer = tf.reduce_sum(tf.mul(embd_user, embd_item), 1)
infer = tf.add(infer, bias_global)
infer = tf.add(infer, bias_user)
infer = tf.add(infer, bias_item, name="svd_inference")
# Compute the regularization term
regularizer = tf.add(tf.nn.l2_loss(embd_user), tf.nn.l2_loss(embd_item), name="svd_regularizer")
regularizer = tf.add(regularizer, tf.reduce_sum(tf.matmul(feedback_batch, tf.mul(w_feedback, w_feedback))))
regularizer = tf.add(regularizer, tf.nn.l2_loss(bias_user))
regularizer = tf.add(regularizer, tf.nn.l2_loss(bias_item))
return infer, regularizer
def optimiaztion(infer, regularizer, rate_batch, learning_rate=0.001, reg=0.1, device="/cpu:0"):
'Optimization function to calculate cost and specify optimization parameters'
with tf.device(device):
# Calculate the squared error
cost_l2 = tf.nn.l2_loss(tf.sub(infer, rate_batch))
# Calculate the cost of regularization term and final cost
penalty = tf.constant(reg, dtype=tf.float32, shape=[], name="l2")
cost = tf.add(cost_l2, tf.mul(regularizer, penalty))
# Choose optimization algorithm for training
#train_op = tf.train.AdamOptimizer(learning_rate).minimize(cost)
train_op = tf.train.RMSPropOptimizer(learning_rate).minimize(cost)
return cost, train_op
|
24,974 | ebccdc5be53538f404e9d81fd4fb83a7b03fd3a4 | #### Pygame Simulator ####
##########################
## Simulator
## IT 2016
##
## Andrew Wilkie, Maddie Mackey
##########################
# Imported modules
import pygame
from pygame.locals import *
import math, random
import sys#, serial
import gui, physics
import time
#from xml.etree import ElementTree # use for Google Maps
def update():
''' update everything for pygame'''
all_sprites.draw(screen)
pause_button_group.draw(screen)
Gui.Variables(balloon.x, balloon.y, balloon.z,
theta, yaw)
pygame.display.flip()
pygame.display.update() # Update the screen to show all changes that have been made
screen.fill((255,255,255)) # fill the spare space where sprites are not located
clock.tick(60)
class Balloon(pygame.sprite.Sprite):
""" This class represents the Balloon """
def __init__(self, altitude, x, y, home_x, home_y):
super(Balloon, self).__init__()
# balloon image
self.original_img = pygame.image.load("images/arrow.png")
self.image = self.original_img
self.rect = self.original_img.get_rect()
self.x, self.y = x, y
self.home_x, self.home_y = home_x, home_y
self.z = altitude # in meters
self.Vx, self.Vy = 1.0, 1.0
def update(self):
# update balloon location
self.rect.x, self.rect.y = convert_coords(self.x, self.y, "pygame")
def move(self, angle):
speed = 0.5 #phy.velocity(self.z) / 3.6
# convert to km/h. in other words slow the craft down
#print("angle_move", angle)
# sine and cos need to change when quadrants change... possibly
# move does not move object in the same dircetion as planned
self.Vx = math.cos(angle) * speed # + wind
self.Vy = math.sin(angle) * speed # + wind
self.x += self.Vx
self.y += self.Vy
self.image = pygame.transform.rotate(self.original_img, angle) # reversed angular rotation
#print("ang", angle)
#print("x", self.x)
#print("Y", self.y)
def compute_theta(self):
d_x = self.x - self.home_x
d_y = self.y - self.home_y
#print(d_x,d_y)
c = math.sqrt(((d_x ** 2) + (d_y ** 2))) #distance from home
#print(c)
theta = math.atan2(d_x,d_y)
theta = math.degrees(theta)
#print("theta", theta)
return theta
class Home(pygame.sprite.Sprite):
""" This class represents the Home base (landing location)"""
def __init__(self, lat, lon):
super(Home, self).__init__()
self.image = pygame.image.load("images/home.png")
self.rect = self.image.get_rect()
self.rect.x, self.rect.y = convert_coords(lat, lon, "pygame")
def computePID(setpoint,
_input, last_error, prev_time):
""" PID algorithm for anything """
kp = 0.5
kd = 5
ki = 0.02
current_time = time.time()
d_time = current_time - prev_time
error = setpoint - _input
#print("error", error)
#error_sum += (error * d_time)
d_error = (error - last_error) / d_time
output = kp * error #+ kd * d_error#ki * error_sum + kd * d_error
last_error = error
prev_time = current_time
return output, last_error, prev_time
def xyzPID(home_lon, home_lat, home_alt,
lon, lat, alt,
error_sum, last_error_lon, last_error_lat, prev_time):
""" calculates PID for x, y and z """
#x PID
output_lon, last_error_lon, prev_time = calculatePID(home_lon,lon,error_sum, last_error_lon, prev_time)
#y PID
output_lat, last_error_lat, prev_time = calculatePID(home_lat,lat,error_sum, last_error_lat, prev_time)
#z PID
#calculatePID(home_alt,
# alt,
# error_sum, last_error_alt, prev_time)
return output_lon, output_lat, last_error_lat, last_error_lon, prev_time
def computeYaw(Vx, Vy):
""" calculate the yaw of the object"""
#print(Vx, Vy)
if Vx > 0:
if Vy > 0:
angle = (math.degrees(math.atan2(Vy,Vx)))#+ how far it is from the x axis)
#print(angle)
return angle
elif Vy < 0:
angle = (math.degrees(math.atan2(Vy,Vx)) )#- how far from x axis)
#print(angle)
return angle
else:
#print(math.degrees(math.atan2(Vy,Vx)))
return math.degrees(math.atan2(Vy,Vx))
def convert_coords(x, y, conversion):
"""Convert coordinates into pygame coordinates."""
if conversion == "cartesian" :
# convert to cartesian plane coordinates
x_new = x - (width/2)
y_new = (height/2) + y
elif conversion == "pygame":
# only needed to place images in pygame
x_new = x + (width/2)
y_new = (height/2) - y
return x_new, y_new
# pygame setup
pygame.init()
width = 800
height = 600
screen = pygame.display.set_mode((width, height))
screen_rect = pygame.Rect((0,0),(width, height))
clock = pygame.time.Clock()
Gui = gui.GUI(screen, width, height)
pygame.display.set_caption("G(A)SP Simulator") # window title
font = pygame.font.SysFont("monospace", 11)
## pause button
pause_button = pygame.sprite.Sprite()
pause_button.image = pygame.image.load('images/pause_button.png')
pause_button.rect = pause_button.image.get_rect()
pause_button_group = pygame.sprite.GroupSingle(pause_button)
pause_button.rect.top = 10
pause_button.rect.left = 10
# other variables
finish = '' # pygame loop variable
ser = False # serial port
port = 'COM3' # name of the port
# physics variables
mass = None
area = None
angle = 0
speed = 1 # speed of object
force = None
# Object variables
home_lon, home_lat, home_alt = 0, 0, 0 # longitude = Y, latitude = X
longitude, latitude, altitude = 300, -400, 43000 # cartesian plane coordinates
#bearing = 0 # compass bearing
theta = 0
yaw = 0
# algorithm variables
error_Sum, last_error, prev_time = 0, 0, 0
output_lon, output_lat = 0, 0
# sprites
all_sprites = pygame.sprite.Group()
balloon = Balloon(altitude,latitude,longitude,home_lat,home_lon)
home = Home(home_lat,home_lon)
all_sprites.add(balloon)
all_sprites.add(home)
theta = balloon.compute_theta()
phy = physics.Physics()
# Serial port - from Arduino
"""while not ser:
try:
ser = serial.Serial(port, 9600) # name/path needs to be changed
except:
print("Not connected")"""
#Gui.TitleScreen() # Display the title screen
#pygame.time.delay(2000) # wait 2 seconds then change screens
# Main loop
while (finish != 'quit'):
# recieve data
"""while (not ser):
# Serial disconnected
screen.fill((255,255,255))
finish = Gui.Disconnected()
update()
try:
ser = serial.Serial(port, 9600) # name/path needs to be changed
except:
pass
print(ser.readline()) # read from serial port """
# interpret data
#output_lon, output_lat, last_error_lat ,last_error_lon, prev_time = xyzPID(home_lon, home_lat, home_alt,
# longitude, latitude, altitude, error_Sum, last_error_lon, last_error_lat, prev_time)
## Pause data stream
while finish == 'pause':
finish = Gui.Pause(balloon.x, balloon.y, balloon.z,
theta, yaw,
all_sprites, pause_button)
theta = balloon.compute_theta()
yaw = computeYaw(balloon.x, balloon.y)
output, last_error, prev_time = computePID(theta+180, yaw, last_error, prev_time)
if balloon.z > home_alt:
balloon.move(yaw+180)
#bearing = 90 - theta
yaw += output
#balloon.move(yaw)
balloon.z -= phy.velocity(balloon.z) # move the balloon in the Z axis (falling)
if balloon.z <= home_alt:
balloon.z = home_alt
balloon.update()
# Keyboard Events
for event in pygame.event.get():
if event.type == pygame.QUIT:
finish = "quit"
if event.type == pygame.KEYDOWN and event.key == K_ESCAPE:
finish = "pause"
if event.type == pygame.MOUSEBUTTONUP: # check if button clicked
pos = pygame.mouse.get_pos()
if pause_button.rect.collidepoint(pos): # pause
finish = "pause"
update() # update screen
pygame.quit()
## buttons, pause
# theta not changing dutringf flight
# everything in cartesian (excpet display )
|
24,975 | abfa83cf6e361c38d0f079a35bab6bcc8e2929c6 | from django.contrib import admin
from django.urls import path
from django.conf.urls import url, include
from rest_framework import routers
from ofertasBlog import views
from ofertasBlog.models import Oferta
router = routers.DefaultRouter()
router.register(r'ofertas', views.OfertaViewSet)
urlpatterns = [
path('admin/', admin.site.urls),
url( r'^', include(router.urls)),
url( r'^api-auth/', include( 'rest_framework.urls', namespace='rest_framework' ))
]
|
24,976 | edc10b9ed5bdadaa526a1e6ae751572b2b76f960 | """
This modules contains miscellaneous helpers for various testing and stubbing scenarios.
Included are:
* modify_buffer_object() -- Modifies a Python buffer object to allow the user to replace weird and challenging
functions/methods like socket.recv_into()
* await_condition() -- Repeated evaluates a callable at a specified poll rate for a given amount of time, and
either returns if the callable became true quickly enough, or asserts otherwise.
"""
import ctypes
import time
def modify_buffer_object(source_buffer, dest_buffer, nbytes=None):
"""
Modify a python object that supports the buffer interface internally.
This is useful for testing things like socket.recv_into()
dest_buffer must be smaller than source_buffer, or the code will assert.
:param source_buffer: The source for the new data to be written into dest_buffer.
:param dest_buffer: The buffer to be modified inline.
:param nbytes: (OPTIONAL) The maximum number of bytes to write into the dest_buffer.
If set, the number of bytes written is the minimum of nbytes and the source length.
Default: None
:return: The total number of bytes written into dest_buffer.
"""
assert len(source_buffer) <= len(dest_buffer)
copy_len = len(source_buffer) if nbytes is None else min(nbytes, source_buffer)
buffer_ptr = (ctypes.c_byte * len(dest_buffer)).from_buffer(dest_buffer)
new_value_ptr = (ctypes.c_byte * len(source_buffer)).from_buffer_copy(source_buffer)
buffer_ptr[:copy_len] = new_value_ptr[:]
return copy_len
def await_condition(description, condition_eval_callable, on_failure=lambda: True, timeout=10, poll_s=0.1):
"""
Await a condition function to return True, otherwise raise an AssertionError if
the condition did not return True within the alloted time.
:param description: A string description of the condition we are awaiting.
:type description: str
:param condition_eval_callable: The callable of arity 0 (function, lambda, etc.) to monitor.
This should return False when not completed, and return True when done.
:type condition_eval_callable: -> bool
:param on_failure: A callable of arity 0 that is called when a failure condition occurs.
Default: NOOP
:type on_failure: -> NoneType
:param timeout: The number of seconds to wait for the condition to become True.
Default: 10 s
:type timeout: float | int
:param poll_s: The period of time between checking the condition.
Default: 100 ms
:type poll_s: float | int
:return: None.
:raises: AssertionError
"""
start_time = time.time()
def should_continue():
return time.time() - start_time < timeout
while not condition_eval_callable():
if not should_continue():
on_failure()
raise AssertionError(
"Awaiting condition {0} has timed out after {1} seconds".format(description, timeout)
)
time.sleep(poll_s)
|
24,977 | 4a666b8c077d6f9cc01f97182c047260a1dd20ab | #!/usr/bin/python
from random import random
import math
import ephem
# import field
# to be implemented once the field class has been created
class ssobj(ephem.EllipticalBody):
'Class for all Survey Simulator objects.'
def __init__(self, a, e, inc, capom, argperi, H=5, M=0.0):
# ephem.EllipticalBody.__init__()
self.a = a
self.e = e
self.inc = inc # degrees
self.Om = capom # degrees
self.om = argperi # degrees
self.H = H
self.M = M
self._G = -0.12 # Hard coded by JM: "c Hard coded slope for magnitude ! Bad boy !"
#----------- a
@property
def a(self):
"""I'm the a property."""
return self._a
@a.setter
def a(self, value):
if not 0.0 <= value <= 10E6:
raise ValueError('Bad a value. Ensure 0.0 < a < 10E6')
self._a = value
#----------- e
@property
def e(self):
"""I'm the e property."""
return self._e
@e.setter
def e(self, value):
if not 0.0 <= value <= 1.0:
raise ValueError('Bad e value. e must be between 0 and 1')
self._e = float(value)
#----------- inc
@property
def inc(self):
"""I'm the inc property."""
return self._inc
@inc.setter
def inc(self, value):
if not 0.0 <= value <= 180.0:
raise ValueError('Bad inclination value. Ensure 0.0 < inclination < 90 degrees')
self._inc = value
#----------- Om
@property
def Om(self):
"""I'm the Om property."""
return self._Om
@Om.setter
def Om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad Om value. Om must be between 0 and 360 degrees')
self._Om = float(value)
#----------- om
@property
def om(self):
"""I'm the om property."""
return self._om
@om.setter
def om(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad om value. om must be between 0 and 360 degrees')
self._om = float(value)
#----------- H
@property
def H(self):
"""I'm the H property."""
return self._H
@H.setter
def H(self, value):
self._H = float(value)
#----------- epoch
@property
def epoch(self):
"""I'm the epoch property."""
return self._epoch
@epoch.setter
def epoch(self, value):
self._epoch = float(value)
#----------- epoch_M
@property
def epoch_M(self):
"""I'm the epoch_M property."""
return self._epoch_M
@epoch_M.setter
def epoch_M(self, value):
self._epoch_M = float(value)
#----------- M
@property
def M(self):
"""I'm the M property."""
return self._M
@M.setter
def M(self, value):
if not 0.0 <= value <= 360.0:
raise ValueError('Bad M value. M must be between 0 and 360 degrees')
self._M = float(value)
#------------------------------- Object Status --------------------------------
def __str__(self):
"""Print the current orbital parameters a, e, inc, argperi, capom, H"""
status = ("\na: %.2f \n" % self.a +
"e: %.2f \n" % self.e +
"inc: %.2f deg \n" % (self.inc * 180/math.pi) +
"om: %.2f deg \n" % (self.om * 180/math.pi) +
"Om: %.2f deg \n" % (self.Om * 180/math.pi) +
"H: %.2f \n" % self.H
)
return status
#-------------------------- Size Distribution ---------------------------------
def drawH(self, alpha, hmax, alpha_faint=None, contrast=1, hbreak=None,
hmin=1):
"""Compute and assign and H-magnitude from a so-called singlE
power-law, knee, or divot H-magnitude distribution.
When provided a slope alpha and a faint-side maximum H-magnitude
(hmax), a H-magnitude is drawn randomly from the distribution
dN/dH propto 10**(alpha H)
in the range hmin = 1 to hmax. Specify hmin to change the bright-end.
Specifying an hbreak and alpha_faint will draw from a knee distribution
Specifying an hbreak, alpha_faint and contrast will draw from a divot
distrubtion as in Shankman et al. 2013
e.g.
---Single Power Law---
object.drawH(0.8,13)
will draw an H-magnitude from the appropriate distribution such that
H [1,13]
object.drawH(0.8,13,hmin=5)
will draw an H-magnitude such that H [5,13]
---Knee---
To draw from a knee distribution specify hbreak and alpha_faint
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5)
This will draw an H-magnitude from a distrubtion that breaks at H=9
from a slope of 0.8 to a slope of 0.5. hmin can also be specified here.
---Divot---
To draw from a divot (see Shankman et al 2013), specify hbreak,
alpha_faint, and the contrast value. Contrasts should be > 1.
hmin can also be specified.
object.drawH(0.8, 13, hbreak=9, alpha_faint = 0.5, contrast = 23)
"""
# Avoid singularity for alpha = 0
alpha = 0.0000000001 if alpha == 0 else alpha
# Set alpha_faint to alpha for the case of a single power-law
alpha_faint = alpha if alpha_faint is None else alpha_faint
# Avoid singularity for alpha_faint = 0
alpha_faint = 0.0000000001 if alpha_faint == 0 else alpha_faint
# Set hbreak to be the maximum H for the case of a single power-law
hbreak = hmax if hbreak is None else hbreak
# ckc is the fraction of objects big (H<Hbreak) of the break
# (with contrast cont >= 1 as in Shankman et al. 2013)
ckc = (1.0 + 1.0 / contrast * alpha / alpha_faint *
(10**(alpha_faint*(hmax - hbreak)) - 1.0))**(-1.0)
rv = random()
if (rv < ckc):
rv = random()
hbright = 10**(alpha*hmin)
hfaint = 10**(alpha*hbreak)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha
else:
rv = random()
hbright = 10**(alpha_faint*hbreak)
hfaint = 10**(alpha_faint*hmax)
self.H = math.log10(rv*(hfaint - hbright) + hbright) / alpha_faint
#----------------- Fuzzing Variables a,e,inc, argperi, capom ------------------
def fuzz(self, variable, fz, type=None):
"""Perturb (fuzz) semimajor axis randomly by up to +- percent specified
Input is treated as percentage if type is not specified as 'abs'.
If type = 'abs', a will be changed randomly by +- amount specified.
The first argument is a string containing the variable to be fuzzed.
The appropriate options are 'a', 'e', 'inc', 'Om', 'om'
e.g.
# KBO(a, e, inc, argperi, capom)
object = ssobj(75, 0.5, 12, 45, 60)
object.fuzz('a', 0.1)
this will take a and randomly perturb it by +- 10%
object.fuzz('a', 10)
produces the same result
---
Conversely,
object.fuzz('a', 0.1, type='abs')
pertubs a by +- 0.1 AU, and
object.fuzz('a', 10, type='abs')
perturbs a by +- 10 AU
"""
# Check to see if the attribute exists, if so get the value
if not hasattr(self, variable):
raise ValueError("You tried to fuzz a parameter that does not exit")
var = getattr(self, variable)
# if variable is an angle, treat it properly as
# float(ephem.EllipticalBody().inc) gives the angle in radians
if variable in ['inc', 'om', 'Om']:
var = float(var)*180.0/math.pi
# set fuzzer to percent
fz = fz/100.0 if (fz > 1.0 and type is None) else fz
var = (var*(1.0 + fz*(2.0*random()-1.0)) if type is None else
(var + (2.0*random()-1.0)*fz))
setattr(self, variable, var)
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*- Detect *-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
#-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*-*
class detect(object):
fuzzedvars =[]
@classmethod
def load_survey(cls, path):
# Empty dictionary to contain all of the field objects in the class
cls.fields = {}
# path to pointing.list directory
# create field objects for every pointing which are shared by the class
@classmethod
def hdraw(cls, *args):
pass
@classmethod
def fuzz_objects(cls, *args):
# cls.fuzzed = True # Probably unnecessary
options = ['a', 'e', 'inc', 'Om', 'om']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of a fuzzable variable')
if not item[2] is 'abs':
rais ValueError("The third argument for fuzz_objects MUST be 'abs' if specified")
if len(item) > 3:
raise ValuError("Specify the variable to be fuzzed and the amount e.g. ('inc', 1, 'abs')")
cls.fuzzedvars = args
@classmethod
def load_file(cls, filepath, *args):
cls.filepath = filepath
#
# take in the order of the variables in the file as a tuple
# i.e. ss.loadfile(path, ('inc',1), ('a',2)) counting from 0
options = ['a', 'e', 'inc', 'Om', 'om', 'H', 'M', 'M_epoch']
for item in args:
if not item[0] in options:
raise ValueError('Your given input of ' + item[0] + ' is not of the appropriate read-in type')
if len(item) > 2:
raise ValuError("Specify the variable and column of the variable in the form ('a', 0), counting from 0")
cls.elementorder = args
@classmethod
def numdetections(class, numdetections):
cls.numdetections = numdetections
@classmethod
def output(cls, outputfile):
cls.outputfile = outputfile
def __init__(self, external_candidate):
# Take in the cadidate object and
# do all of the actual detection stuff.
pass
# Probably also write out for successful detections
|
24,978 | ec0bd56076bf00a2c647f610cac095e787f4a8b1 | from cassiopeia import baseriotapi
from .. import int_test_handler
def test_all():
print("dto/team tests...")
test_teams_by_summoner_id()
test_teams_by_id()
def test_teams_by_summoner_id():
int_test_handler.test_result(baseriotapi.get_teams_by_summoner_id(int_test_handler.summoner_id))
def test_teams_by_id():
int_test_handler.test_result(baseriotapi.get_teams_by_id(int_test_handler.team_id))
|
24,979 | 488326288be5e09b52271136e5e37d5f60063d3e | from .base import BaseTask
class TestTask(BaseTask):
def __init__(self, appID: int):
super().__init__(appID, 'TestApp')
def exec(self, inputData):
return inputData * 2
|
24,980 | e8209d90cc0941f5fcfe443fc557418f0cb1f382 | #!/usr/bin/python
# -*- coding:utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
import statsmodels.api as sm
import sys
import os
filename = "result/round5/cache/benchmark_read_info_172.16.1.92.txt"
data = []
def getData(path):
data = []
for f in os.listdir(path):
filename = os.path.join(path,f)
if os.path.isfile(filename) and f.endswith(".log"):
with open(filename, 'r') as fp:
lines = fp.read().split('\n')
for line in lines:
try:
if line.startswith("Completed"):
data.append(float(line.split(' ')[-1][:-4]))
except ValueError,e:
print("error append",e, line)
return data
def main():
for path in sys.argv[1:]:
data = getData(path)
dataLen = len(data)
print dataLen
data = data[:dataLen/500 * 500]
newData = data[::dataLen/500]
newDataLen = len(newData)
print newDataLen
x = [x+1 for x in range(newDataLen)]
#y = ecdf(x)
plt.plot(x,newData,label=path.split('/')[-1])
plt.legend(loc="best")
#plt.show()
plt.savefig('bw.eps', format='eps', dpi=1000)
if __name__ == '__main__':
main()
|
24,981 | 4f57284fe8d2e22cc576d289d95a99014cef0af3 | '''
实现 int sqrt(int x) 函数。
计算并返回 x 的平方根,其中 x 是非负整数。
由于返回类型是整数,结果只保留整数的部分,小数部分将被舍去。
示例 1:
输入: 4
输出: 2
示例 2:
输入: 8
输出: 2
说明: 8 的平方根是 2.82842...,
由于返回类型是整数,小数部分将被舍去。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/sqrtx
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution:
def mySqrt(self, x: int) -> int:
return int(sqrt(x)) #int()向下取整 round()四舍五入取整 math.ceil()向上取整
|
24,982 | 10c81ae9de93148da212a76768d83270f6c201dd | # Task3 Q2
languages = ('swift', 'python' , "java" )
Word = input('\nPlease Guess the Word!\n').lower()
while True:
if Word in languages:
print('Good! The Word Exist.')
exit()
else:
print('Wrong! Please Try Again.')
Word = input('\nPlease Guess the Word!\n').lower() |
24,983 | 10e347d76b8dc80c560b73e21c489d42bf32b380 | import sys
def WHA(inStr):
mask = 0x3FFFFFFF
outHash = 0
for c in inStr:
byte = ord(c)
intermediate_value = ((byte ^ 0xCC) << 24) | ((byte ^ 0x33) << 16) | ((byte ^ 0xAA) << 8) | (byte ^ 0x55)
outHash = (outHash & mask) + (intermediate_value & mask)
return outHash
if __name__ == '__main__':
input_file = sys.argv[1]
output_file = sys.argv[2]
with open(input_file) as f1:
inStr = f1.read().strip()
encode = 0
encode = WHA(inStr)
encode = hex(encode)
f = open(output_file, 'w')
f.write(encode)
#0x2370e2c5
|
24,984 | cb47b5693fcbbb163eb75532b5eb18fc4ab9c1eb | def sentence(): # Sentence
result = "<sentence>"
# result = result + subject(articlex="the", adjectivex="big", nounx="dog")
result = result + subject(pronounx='she')
result = result + predicate(verbx = 'bit',pronounx='me',adverbx=('quickly'))
result = result + "</sentence>"
return result
def subject(articlex='',adjectivex='',nounx='',pronounx=''):
# print("Entering subject: " + articlex + ' ' + adjectivex + ' ' + nounx + ' ' + pronounx) # Debug
result = "<subject>"
if nounx != '':
result = result + nounPhrase(articlex=articlex,adjectivex=adjectivex,nounx=nounx)
if pronounx != '':
result = result + pronoun(pronounx=pronounx)
result = result + "</subject>"
return result
def predicate(adverbx='',verbx='',nounx='',pronounx='',articlex='',adjectivex=''):
result = "<predicate>"
if verbx != '':
result = result + verbPhrase(adverbx=adverbx,verbx=verbx)
if nounx != '':
result = result + objectPhrase(nounx='',pronounx='',articlex='',adjectivex='')
if pronounx != '':
result = result + objectPhrase(pronounx=pronounx)
result = result + "</predicate>"
return result
def objectPhrase(articlex="",adjectivex="",nounx="",pronounx=""):
result="<objectPhrase>"
if nounx != '':
result = result + nounPhrase(articlex, adjectivex, nounx)
if pronounx != '':
result = result + pronoun(pronounx)
result = result + "</objectPhrase>"
return result
def nounPhrase(articlex="",adjectivex="",nounx=""):
result = "<nounPhrase>"
if articlex != '':
result = result + article(articlex)
if adjectivex != '':
result = result + adjective(adjectivex)
if nounx != '':
result = result + noun(nounx)
result = result + "</nounPhrase>"
return result
def noun(text, number="",gender=""):
result = "<noun"
if number != '':
result = result + " number='" + number + "'"
if gender != '':
result = result + " gender='" + gender + "'"
result = result + ">" + text + "</noun>"
return result
def pronoun(pronounx="", numberx="",genderx=""):
# print('Entering pronoun:' + pronounx + ' ' + numberx + ' ' + genderx ) # Debug.
result = "<pronoun"
if numberx != '':
result = result + " number='" + numberx + "'"
if genderx != '':
result = result + " gender='" + genderx + "'"
result = result + ">"
if pronounx != '':
result = result + pronounx
result = result + "</pronoun>"
return result
def article(text, number="",gender=""):
result = "<article"
if number != '':
result = result + " number='" + number + "'"
if gender != '':
result = result + " gender='" + gender + "'"
result = result + ">" + text + "</article>"
return result
def adjective(text, number="",gender=""):
result = "<adjective"
if number != '':
result = result + " number='" + number + "'"
if gender != '':
result = result + " gender='" + gender + "'"
result = result + ">" + text + "</adjective>"
return result
def verbPhrase(adverbx="",modalx="",verbx=""):
result = "<verbPhrase>"
if adverbx != '':
result = result + adverb(adverbx)
if modalx != '':
result = result + modal(modalx)
if verbx != '':
result = result + verb(verbx)
result = result + "</verbPhrase>"
return result
def adverb (text, adverbx=""):
result = "<adverb>" + text + "</adverb>"
if adverbx != "":
result = result + "," + adverb(adverbx)
return result
def modal(text,modalx=""):
result = "<modal>" + text + "</modal>"
if modalx != '':
result = result + modal(modalx)
return result
def verb(text, tensex="", moodx ="", numberx = "", genderx=""):
result = "<verb"
if tensex != '':
result = result + "tense="+tensex
if moodx != '':
result = result + "mood="+ moodx
if numberx != '':
result = result + "number=" + numberx
if genderx != '':
result = result + "gender=" + genderx
result = result + ">" + text + "</verb>"
return result
def mySentenceSearch (root): # mySentenceSearch
pattern = './/subject//noun'
if root.find(pattern) != None:
print(root.find(pattern).text)
pattern = './/subject//pronoun'
if root.find(pattern) != None:
print(root.find(pattern).text)
pattern = './/predicate//verb'
if root.find(pattern) != None:
print(root.find(pattern).text)
pattern = './/objectPhrase//noun'
if root.find(pattern) != None:
print(root.find(pattern).text)
pattern = './/objectPhrase//pronoun'
if root.find(pattern) != None:
print(root.find(pattern).text)
return
|
24,985 | 6d3fc4f797bc38bd6773c604ba040b0c940b6e20 | DataPath = "some data path"
def load(path='default_path'):
print(f"Read data from {path}")
def dump(path='default_path'):
print(f"Dump data to {path}") |
24,986 | d99630b35514b0e4b62257e25dd4c37aa37e119f | #!/usr/bin/python
#-*- encoding: utf-8 -*-
"""
9. Stellenbezogene Lueckenliste fuellen
9.0. Vorhandene Tabelle leeren
9.1. Alle Passages auflisten
9.2. Alle Handschriften der Systematischen Lueckenliste auflisten
9.3. Schleife ueber alle Handschriften
9.4. Schleife ueber alle Passages
9.5. Eintrag in ActsNNattLac, wenn die Handschrift an genannter Stelle
in der Systematischen Lueckenliste verzeichnet ist
"""
__author__="volker.krueger@uni-muenster.de"
def enter2LocalLacList(cursor, hs, db, lactable, anfadr, endadr):
import Address
b, bc, bv, bw, ec, ev, ew = Address.decodeAdr(anfadr, endadr)
hsnr = Address.hs2hsnr(hs)
cmd = "insert into %s.%s " % (db, lactable)
cmd += "(buch, kapanf, versanf, wortanf, kapend, versend, wortend, "
cmd += "anfadr, endadr, hs, hsnr, anfalt, endalt) "
cmd += "values (%d, %d, %d, %d, %d, %d, %d, " % (b, bc, bv, bw, ec, ev, ew)
cmd += "%d, %d, '%s', %d, %d, %d) " % (anfadr, endadr, hs, hsnr, anfadr, endadr)
cursor.execute(cmd)
def main_9(db1, db2, tab1, tab2, mode="remote"):
import db_access3
dba = db_access3.DBA(mode)
cursor = dba.cursor()
sourcetable = tab1
lactable = sourcetable + "Lac"
# 0. Truncate table
cmd = "truncate %s.%s " % (db1, lactable)
cursor.execute(cmd)
dba.commit()
# 1.1.
passages, passcount = dba.getPassages(db1, sourcetable)
# 1.2.
cmd = "select distinct hs from %s.%s " % (db2, tab2)
cmd += "order by hsnr "
cursor.execute(cmd)
mss = cursor.fetchall()
# 1.3.
for ms in mss:
hs = ms[0]
for passage in passages:
anfadr = passage[0]
endadr = passage[1]
cmd = "select count(id) from %s.%s " % (db2, tab2)
cmd += "where anfadr <= %d and endadr >= %d " % (anfadr, endadr)
cmd += "and hs = '%s' " % (hs)
cursor.execute(cmd)
result = cursor.fetchone()
rescount = result[0]
if rescount > 0:
enter2LocalLacList(cursor, hs, db1, lactable, anfadr, endadr)
dba.commit() # it's an InnoDB table
cursor.close()
dba.close()
if __name__ == "__main__":
from optparse import OptionParser
import sys, time
print time.ctime()
parser = OptionParser()
parser.add_option("-d", "--database", dest="database", help="Giving database")
parser.add_option("-m", "--mode", dest="mode", help="choose 'local' or 'remote' database access, 'remote' is default")
parser.add_option("-t", "--table", dest="table", help="Giving table name attestations")
parser.add_option("-e", "--ref_db", dest="ref_db", help="Database of lacuna table")
parser.add_option("-l", "--lactable", dest="lactable", help="Systematic lacunae table")
(opts, args) = parser.parse_args()
parser.destroy()
if opts.database == None or opts.table == None or opts.ref_db == None:
print "Error: At least one parameter necessary is missing!"
print "See python %s -h" % sys.argv[0]
sys.exit(1)
main_9(opts.database, opts.ref_db, opts.table, opts.lactable)
print "%s finished at %s" % (sys.argv[0], time.ctime()) |
24,987 | 73d0a988522714278d39b90d58dd284037c76679 | #!/usr/bin/env python
# encoding: utf-8
import sys
sys.path.insert(0, "/home/daipeng/Desktop/program/google_appengine")
import unittest
import time
from selenium import webdriver
from google.appengine.api import memcache
from google.appengine.ext import ndb
from google.appengine.ext import testbed
from models import Tag, Category, Blog
def testdb_init():
# db clear
t1 = Tag(title="emacs")
t2 = Tag(title="python")
t1.put()
t2.put()
c1 = Category(title='program')
c2 = Category(title='edit')
c1.put()
c2.put()
b1 = Blog(title='first blog')
b1.context = "this is my first blog, hello world"
b1.put()
b2 = Blog(title="second blog")
b2.context = "this is my second blog, hello python"
b2.tags = [t1.key, t2.key]
b2.put()
b3 = Blog(title="third blog")
b3.context = "this is my third blog, hello python"
b3.tags = [t1.key,]
b3.category = c2.key
b3.put()
b4 = Blog(title="fourth blog")
b4.context = "this is my fourth blog, hello python"
b4.tags = [t2.key,]
b4.category = c1.key
b4.put()
class BaseTestCase(unittest.TestCase):
def setUp(self):
# the selenium firefox
self.ff = webdriver.Firefox()
self.base_url = "localhost:8080"
self.ff.implicitly_wait(3)
# the testbed setup
self.testbed = testbed.Testbed()
self.testbed.activate()
self.testbed.init_datastore_v3_stub()
self.testbed.init_memcache_stub()
def tearDown(self):
self.ff.quit()
self.testbed.deactivate()
class BlogIndexTestCase(BaseTestCase):
def test_blog_index_page(self):
# TODO: data init
testdb_init()
url = self.base_url
self.ff.get(url)
time.sleep(10)
if __name__ == '__main__':
unittest.main()
|
24,988 | bc6d8435910d267684a842c7087a97931d36167a | from imageai.Detection import VideoObjectDetection
import os
import json
import cv2
import tensorflow as tf
# gpus = tf.config.experimental.list_physical_devices('GPU')
# tf.config.experimental.set_memory_growth(gpus[0], True)
tf.debugging.set_log_device_placement(True)
config = tf.compat.v1.ConfigProto()
config.gpu_options.allow_growth = True
# config.gpu_options.per_process_gpu_memory_fraction = 0.9
session = tf.compat.v1.Session(config=config)
FORCE_REBUILD = True
SAVE_ALL_AND_EXIT = True
SHOW_VID = False
SAVE_VID = True
SAVE_ORIGINAL_VID = True
# 0 - UT egocentric video with blurred faces, first 350 frames
# 1 - music room video
# 2 - music room video, first second
INVID_ID = 0
data_dirs = ["/home/wcroughan/glasses_data", "/path/to/your/data/folder"]
data_dir = ""
for dd in data_dirs:
if os.path.exists(dd):
data_dir = dd
break
if data_dir == "":
print("Couldn't find any of the folders listed in data_dirs. Add the folder on your machine to the list.")
exit()
if INVID_ID == 0:
input_video_path = os.path.join(data_dir, "P01.mp4")
elif INVID_ID in [1, 2]:
input_video_path = os.path.join(data_dir, "music_15fps_480.mp4")
else:
print("unknown input video index")
exit()
output_video_path = os.path.join(data_dir, "outvid.avi")
output_original_video_path = os.path.join(data_dir, "outvid_original.avi")
saveFile = os.path.join(data_dir, "VideoSearcherObject{}.dat".format(INVID_ID))
class VideoSearcher:
def __init__(self):
self.detector = VideoObjectDetection()
# self.detector.setModelTypeAsYOLOv3()
# self.detector.setModelPath(os.path.join(data_dir, "yolo.h5"))
self.detector.setModelTypeAsTinyYOLOv3()
self.detector.setModelPath(os.path.join(data_dir, "yolo-tiny.h5"))
self.detector.loadModel()
self.cobs = self.detector.CustomObjects(person=True)
self.videoAnalyzed = False
self.objectFrames = dict()
self.videoFile = ""
self.output_arrays = []
self.count_arrays = []
self.average_output_count = dict()
def VideoCompleteFunction(self, output_arrays, count_arrays, average_output_count):
# print(output_arrays)
# output_arrays[i][j] = dictionary for jth object detected in the ith frame with keys name, percentage_probability, box_points
# print(count_arrays)
# count_arrays[i] = dictionary for ith frame. Keys are all objects that are in frame, values are number of that object in frame
# print(average_output_count)
# average_output_count = dictionary, where keys are all objects found in video, values are all zero, but maybe count per frame rounded down?
self.output_arrays = output_arrays
self.count_arrays = count_arrays
self.average_output_count = average_output_count
all_obs_found = average_output_count.keys()
for ob in all_obs_found:
self.objectFrames[ob] = [i for i in range(
len(count_arrays)) if ob in count_arrays[i].keys()]
print(self.objectFrames)
self.videoAnalyzed = True
def analyzeVideo(self, filename, dur=None):
if self.videoAnalyzed:
print("Already analyzed a video")
return
vid = cv2.VideoCapture(filename)
fps = vid.get(cv2.CAP_PROP_FPS)
vid.release()
self.videoFile = filename
# self.detector.detectObjectsFromVideo(input_file_path=filename, save_detected_video=False, log_progress=True,
# frames_per_second=fps, video_complete_function=self.VideoCompleteFunction)
self.detector.detectObjectsFromVideo(input_file_path=filename, output_file_path=output_video_path, log_progress=True,
frames_per_second=fps, video_complete_function=self.VideoCompleteFunction, detection_timeout=dur)
# frames_per_second=fps, video_complete_function=self.VideoCompleteFunction)
def searchForObject(self, objectName):
return self.objectFrames[objectName]
def makeVideoForObject(self, objectName, filename, prepend_time=0.5, append_time=0.5, drawBox=True, fpsScale=1.0):
framesWithObject = self.objectFrames[objectName]
vid = cv2.VideoCapture(self.videoFile)
fps = vid.get(cv2.CAP_PROP_FPS)
w = int(vid.get(cv2.CAP_PROP_FRAME_WIDTH))
h = int(vid.get(cv2.CAP_PROP_FRAME_HEIGHT))
writer = cv2.VideoWriter(filename, cv2.VideoWriter_fourcc(
'M', 'J', 'P', 'G'), fps * fpsScale, (w, h))
invid_frames = int(vid.get(cv2.CAP_PROP_FRAME_COUNT))
prepend_frames = int(fps * prepend_time)
append_frames = int(fps * append_time)
all_starts = [max(0, framesWithObject[0]-prepend_frames)]
all_ends = [min(invid_frames, framesWithObject[0]+append_frames)]
for f in framesWithObject[1:]:
f1 = max(0, f - prepend_frames)
f2 = min(invid_frames, f+append_frames)
if f1 < all_ends[-1]:
all_ends[-1] = f2
else:
all_starts.append(f1)
all_ends.append(f2)
ret, transitionFrame = vid.read()
if not ret:
print("Problem getting first frame from video {}".format(self.videoFile))
return -1
for st, en in zip(all_starts, all_ends):
transitionFrame[:, :, :] = 255
cv2.putText(transitionFrame, "Frames {} - {}".format(st, en),
(0, h // 2), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0))
for i in range(int(fps * fpsScale / 2)):
writer.write(transitionFrame)
vid.set(cv2.CAP_PROP_POS_FRAMES, st)
f = st
while f < en:
ret, frame = vid.read()
if not ret:
print("Problem getting frame {} from video {}".format(f, self.videoFile))
return -1
if drawBox:
fdicts = self.output_arrays[f]
boxes = [ob['box_points'] for ob in fdicts if ob['name'] == objectName]
for b in boxes:
cv2.rectangle(frame, (b[0], b[1]),
(b[2], b[3]), (255, 255, 255), thickness=2)
writer.write(frame)
f += 1
transitionFrame[:, :, :] = 255
cv2.putText(transitionFrame, "Finished",
(0, h // 2), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 0))
for i in range(int(fps * fpsScale / 2)):
writer.write(transitionFrame)
vid.release()
writer.release()
print("video at {}".format(filename))
def getObjectList(self):
return self.objectFrames.keys()
def saveToFile(self, filename):
if not self.videoAnalyzed:
print("Nothing to save")
return
with open(filename, 'w') as f:
f.write(json.dumps(self.objectFrames))
f.write("\n")
f.write(json.dumps(self.output_arrays))
f.write("\n")
f.write(json.dumps(self.count_arrays))
f.write("\n")
f.write(json.dumps(self.average_output_count))
f.write("\n")
f.write(self.videoFile)
f.write("\n")
return 0
print("Couldn't open file %s" % filename)
return -1
def loadFromFile(self, filename):
if not os.path.exists(filename):
print("File not found: {}".format(filename))
return 1
with open(filename, 'r') as f:
line = f.readline()
self.objectFrames = json.loads(line[:-1])
line = f.readline()
self.output_arrays = json.loads(line[:-1])
line = f.readline()
self.count_arrays = json.loads(line[:-1])
line = f.readline()
self.average_output_count = json.loads(line[:-1])
line = f.readline()
self.videoFile = line[:-1]
return 0
print("Couldn't open file %s" % filename)
return -1
if __name__ == "__main__":
vs = VideoSearcher()
remake_model = FORCE_REBUILD
if not remake_model:
loadret = vs.loadFromFile(saveFile)
if loadret:
print("couldn't load object, remaking file")
remake_model = True
if remake_model:
if INVID_ID == 2:
vs.analyzeVideo(input_video_path, dur=0.5)
else:
vs.analyzeVideo(input_video_path)
vs.saveToFile(saveFile)
if SAVE_ALL_AND_EXIT:
allobs = list(vs.getObjectList())
for ob in allobs:
print(ob)
fname = "object_video_{}.avi".format(ob)
vs.makeVideoForObject(ob, os.path.join(data_dir, fname), fpsScale=1.0)
session.close()
exit()
running = True
while running:
ob = input("Which object?")
if ob == "q":
break
elif ob == "l":
print(vs.getObjectList())
elif ob == "all":
allobs = list(vs.getObjectList())
for ob in allobs:
print(ob)
fname = "object_video_{}.avi".format(ob)
vs.makeVideoForObject(ob, os.path.join(data_dir, fname), fpsScale=1.0)
else:
try:
print(vs.searchForObject(ob))
makevid = input("Make a video for this object (y/N)?")
if makevid == "y":
fname = "object_video_{}.avi".format(ob)
vs.makeVideoForObject(ob, os.path.join(data_dir, fname), fpsScale=1.0)
except KeyError:
print("Object {} not found".format(ob))
session.close()
|
24,989 | 41055a5413231f88dc35d2225751708eb8550faf | from nose.plugins.attrib import attr
import simtk.unit as u
from simtk.openmm import app
import simtk.openmm as mm
import numpy as np
import re
from mdtraj.testing import eq
from unittest import skipIf
from openmoltools import utils, packmol
import os
import openmoltools.openeye
import pandas as pd
import mdtraj as md
from numpy.testing import assert_raises
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(=C=[N-])C=[NH+]3" # this is insane; C=C=[N-]?
smiles_fails_with_strictStereo = "CN1CCN(CC1)CCCOc2cc3c(cc2OC)C(=[NH+]c4cc(c(cc4Cl)Cl)OC)C(C#N)C=[NH+]3" # more sane version
try:
oechem = utils.import_("openeye.oechem")
if not oechem.OEChemIsLicensed(): raise(ImportError("Need License for OEChem!"))
oequacpac = utils.import_("openeye.oequacpac")
if not oequacpac.OEQuacPacIsLicensed(): raise(ImportError("Need License for oequacpac!"))
oeiupac = utils.import_("openeye.oeiupac")
if not oeiupac.OEIUPACIsLicensed(): raise(ImportError("Need License for OEOmega!"))
oeomega = utils.import_("openeye.oeomega")
if not oeomega.OEOmegaIsLicensed(): raise(ImportError("Need License for OEOmega!"))
HAVE_OE = True
openeye_exception_message = str()
except Exception as e:
HAVE_OE = False
openeye_exception_message = str(e)
try:
import parmed
HAVE_PARMED = True
except ImportError:
HAVE_PARMED = False
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_keepconfs():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.\n" + openeye_exception_message)
def test_butanol_unnormalized():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m0.SetTitle("MyCustomTitle")
m1 = openmoltools.openeye.get_charges(m0, normalize=False, keep_confs=1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() == 1, "This OEMol was created to have a single conformation."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
assert m0.GetTitle() == m1.GetTitle(), "The title of the molecule should not be changed by normalization."
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 output.tripos.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_standardize():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
list(molecule.GetAtoms())[0].SetName("MyNameIsAtom")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 standardize output.tripos.mol2", standardize=True)
with open("testing mol2 standardize output.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should not find the text we added, to make sure the molecule is standardized.
assert re.search("MyNameIsAtom", text) is None
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_no_standardize():
molecule = openmoltools.openeye.iupac_to_oemol("cyclopentane")
list(molecule.GetAtoms())[0].SetName("MyNameIsAtom")
openmoltools.openeye.molecule_to_mol2(molecule, tripos_mol2_filename="testing mol2 nostandardize output.tripos.mol2", standardize=False)
with open("testing mol2 nostandardize output.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should find the text we added, to make sure the molecule is not standardized.
assert re.search("MyNameIsAtom", text) is not None
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_output_mol2_multiple_confs():
molecule = openmoltools.openeye.iupac_to_oemol("butanol")
multiple_conformers = openmoltools.openeye.generate_conformers(molecule)
openmoltools.openeye.molecule_to_mol2(multiple_conformers, tripos_mol2_filename="testing mol2 multiple conformers.tripos.mol2", conformer=None)
with open("testing mol2 multiple conformers.tripos.mol2", "r") as outfile:
text = outfile.read()
# This should find more than one conformation
assert text.count("@<TRIPOS>MOLECULE") > 1
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_butanol():
m0 = openmoltools.openeye.iupac_to_oemol("butanol")
m1 = openmoltools.openeye.get_charges(m0, keep_confs=-1)
eq(m0.NumAtoms(), m1.NumAtoms())
assert m1.NumConfs() >= 2, "Butanol should have multiple conformers."
assert m1.NumAtoms() == 15, "Butanol should have 15 atoms"
all_data = {}
for k, molecule in enumerate(m1.GetConfs()):
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(molecule)
all_data[k] = names_to_charges
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
# Build a table of charges indexed by conformer number and atom name
all_data = pd.DataFrame(all_data)
# The standard deviation along the conformer axis should be zero if all conformers have same charges
eq(all_data.std(1).values, np.zeros(m1.NumAtoms()), decimal=7)
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_benzene():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
eq(m0.NumAtoms(), m1.NumAtoms())
print(m1.NumConfs())
assert m1.NumConfs() == 1, "Benezene should have 1 conformer"
assert m1.NumAtoms() == 12, "Benezene should have 12 atoms"
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
eq(sum(names_to_charges.values()), 0.0, decimal=7) # Net charge should be zero
with utils.enter_temp_directory():
# Try saving to disk as mol2
openmoltools.openeye.molecule_to_mol2(m1, "out.mol2")
# Make sure MDTraj can read the output
t = md.load("out.mol2")
# Make sure MDTraj can read the charges / topology info
atoms, bonds = md.formats.mol2.mol2_to_dataframes("out.mol2")
# Finally, make sure MDTraj and OpenEye report the same charges.
names_to_charges, str_repr = openmoltools.openeye.get_names_to_charges(m1)
q = atoms.set_index("name").charge
q0 = pd.Series(names_to_charges)
delta = q - q0 # An object containing the charges, with atom names as indices
eq(delta.values, np.zeros_like(delta.values), decimal=4)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_link_in_utils():
m0 = openmoltools.openeye.iupac_to_oemol("benzene")
m1 = openmoltools.openeye.get_charges(m0)
with utils.enter_temp_directory():
# This function was moved from utils to openeye, so check that the old link still works.
utils.molecule_to_mol2(m1, "out.mol2")
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_smiles():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.iupac_to_oemol("butanol")
charged1 = openmoltools.openeye.get_charges(m1)
eq(charged0.NumAtoms(), charged1.NumAtoms())
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml():
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("CCCCO")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("ClC(Cl)(Cl)Cl")
charged1 = openmoltools.openeye.get_charges(m1)
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml([charged0, charged1])
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_ffxml_simulation():
"""Test converting toluene and benzene smiles to oemol to ffxml to openmm simulation."""
with utils.enter_temp_directory():
m0 = openmoltools.openeye.smiles_to_oemol("Cc1ccccc1")
charged0 = openmoltools.openeye.get_charges(m0)
m1 = openmoltools.openeye.smiles_to_oemol("c1ccccc1")
charged1 = openmoltools.openeye.get_charges(m1)
ligands = [charged0, charged1]
n_atoms = [15,12]
trajectories, ffxml = openmoltools.openeye.oemols_to_ffxml(ligands)
eq(len(trajectories),len(ligands))
pdb_filename = utils.get_data_filename("chemicals/proteins/1vii.pdb")
temperature = 300 * u.kelvin
friction = 0.3 / u.picosecond
timestep = 0.01 * u.femtosecond
protein_traj = md.load(pdb_filename)
protein_traj.center_coordinates()
protein_top = protein_traj.top.to_openmm()
protein_xyz = protein_traj.openmm_positions(0)
for k, ligand in enumerate(ligands):
ligand_traj = trajectories[k]
ligand_traj.center_coordinates()
eq(ligand_traj.n_atoms, n_atoms[k])
eq(ligand_traj.n_frames, 1)
#Move the pre-centered ligand sufficiently far away from the protein to avoid a clash.
min_atom_pair_distance = ((ligand_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + ((protein_traj.xyz[0] ** 2.).sum(1) ** 0.5).max() + 0.3
ligand_traj.xyz += np.array([1.0, 0.0, 0.0]) * min_atom_pair_distance
ligand_xyz = ligand_traj.openmm_positions(0)
ligand_top = ligand_traj.top.to_openmm()
ffxml.seek(0)
forcefield = app.ForceField("amber10.xml", ffxml, "tip3p.xml")
model = app.modeller.Modeller(protein_top, protein_xyz)
model.add(ligand_top, ligand_xyz)
model.addSolvent(forcefield, padding=0.4 * u.nanometer)
system = forcefield.createSystem(model.topology, nonbondedMethod=app.PME, nonbondedCutoff=1.0 * u.nanometers, constraints=app.HAngles)
integrator = mm.LangevinIntegrator(temperature, friction, timestep)
simulation = app.Simulation(model.topology, system, integrator)
simulation.context.setPositions(model.positions)
print("running")
simulation.step(1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_fail1():
with assert_raises(RuntimeError):
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=True)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_fail2():
with assert_raises(RuntimeError):
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, keep_confs=1)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success1():
with utils.enter_temp_directory():
openmoltools.openeye.smiles_to_antechamber(smiles_fails_with_strictStereo, "test.mol2", "test.frcmod", strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_charge_success2():
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_oeassigncharges_fail():
with assert_raises(RuntimeError):
# Fail test for OEToolkits (2017.2.1) new charging function
m = openmoltools.openeye.smiles_to_oemol(smiles_fails_with_strictStereo)
m = openmoltools.openeye.get_charges(m, strictStereo=True, legacy=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
def test_oeassigncharges_success():
# Success test for OEToolkits (2017.2.1) new charging function
m = openmoltools.openeye.iupac_to_oemol("butanol")
m = openmoltools.openeye.get_charges(m, legacy=False)
@skipIf(not HAVE_OE, "Cannot test openeye module without OpenEye tools.")
@skipIf(not HAVE_PARMED, "Cannot test without Parmed Chemistry.")
@skipIf(packmol.PACKMOL_PATH is None, "Skipping testing of packmol conversion because packmol not found.")
@attr("parmed")
def test_binary_mixture_rename():
smiles_string0 = "CCCCCC"
smiles_string1 = "CCCCCCCCC"
with utils.enter_temp_directory(): # Prevents creating tons of GAFF files everywhere.
mol2_filename0 = "./A.mol2"
frcmod_filename0 = "./A.frcmod"
mol2_filename1 = "./B.mol2"
frcmod_filename1 = "./B.frcmod"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
frcmod_filenames = [frcmod_filename0, frcmod_filename1]
prmtop_filename = "./box.prmtop"
inpcrd_filename = "./box.inpcrd"
openmoltools.openeye.smiles_to_antechamber(smiles_string0, mol2_filename0, frcmod_filename0)
openmoltools.openeye.smiles_to_antechamber(smiles_string1, mol2_filename1, frcmod_filename1)
openmoltools.utils.randomize_mol2_residue_names(gaff_mol2_filenames)
box_pdb_filename = "./box.pdb"
gaff_mol2_filenames = [mol2_filename0, mol2_filename1]
n_monomers = [10, 20]
packed_trj = packmol.pack_box([md.load(mol2) for mol2 in gaff_mol2_filenames], n_monomers)
packed_trj.save(box_pdb_filename)
tleap_cmd = openmoltools.amber.build_mixture_prmtop(gaff_mol2_filenames, frcmod_filenames, box_pdb_filename, prmtop_filename, inpcrd_filename)
prmtop = app.AmberPrmtopFile(prmtop_filename)
inpcrd = app.AmberInpcrdFile(inpcrd_filename)
system = prmtop.createSystem(nonbondedMethod=app.PME, nonbondedCutoff=1.0*u.nanometers, constraints=app.HBonds)
|
24,990 | b7b816198ff53f56f468726395b7064ddd8403ed | from ticket.forms import CommentForm, TicketForm
from main.service import get_homes
from django.shortcuts import render, redirect
from .models import *
from django.urls import reverse
from django.contrib.auth.decorators import login_required
STATUSES = {
1:"На рассмотрении",
2:"На рассмотрении",
3:"Выполнено"
}
@login_required(login_url="/accounts/login/")
def ticket(request):
tickets = Ticket.objects.filter(object_id = request.session.get('current_object')).order_by('-id')
homes = get_homes(request)
context = {"homes":homes['homes'], "current":homes['current'],"tickets":tickets}
return render(request,'ticket/ticket.html',context)
@login_required(login_url="/accounts/login/")
def details(request, pk):
ticket = Ticket.objects.get(id = pk)
ticket_files = ticket.ticket_files.all()
status = STATUSES[ticket.status]
initials = {"author_id":request.user.id,'ticket_id': pk}
form = CommentForm(initial=initials)
if request.method == 'POST':
form = CommentForm(request.POST, initial=initials)
if form.is_valid():
comment = form.save(commit=False)
for afile in request.FILES.getlist('file[]'):
handle_uploaded_file(afile,"media/ticket/comment/"+str(comment.id))
file = CommentFile(comment_id=comment.id, file = str(comment.id)+afile.name, size = round(afile.size/1024,2))
file.save()
ticket.save()
form=CommentForm(initial=initials)
else:
form.errors
form = CommentForm(request.POST,initial=initials)
comments = [(comment,comment.comment_files.all()) for comment in ticket.ticket_comments.all()]
homes = get_homes(request)
context = {"homes":homes['homes'], "current":homes['current'],"ticket":ticket,
"status":status,"ticket_files":ticket_files,"comment_form":form,"comments":comments}
return render(request,'ticket/details.html',context)
@login_required(login_url="/accounts/login/")
def add(request):
initials = {"author_id":request.user.id,'object_id': request.session.get('current_object')}
form = TicketForm(initial=initials)
if request.method == 'POST':
form = TicketForm(request.POST, initial=initials)
if form.is_valid():
ticket = form.save(commit=False)
for afile in request.FILES.getlist('file[]'):
handle_uploaded_file(afile,"media/ticket/")
file = TicketFile(ticket_id=ticket.id, file = afile.name,size = round(afile.size/1024,2))
file.save()
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=1).save()
return redirect(reverse('ticket:ticket'))
else:
form.errors
form = TicketForm(request.POST,initial=initials)
homes = get_homes(request)
context = {"homes":homes['homes'], "current":homes['current'], "form":form}
return render(request,'ticket/add.html',context)
def handle_uploaded_file(f, path):
destination = open(path+f.name, 'wb+')
for chunk in f.chunks():
destination.write(chunk)
destination.close()
def subscribe(request, pk):
ticket = Ticket.objects.get(id=pk)
ticket.status = 2
ticket.accepted = True
ticket.accepted_by = request.user
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=2).save()
return redirect("/ticket/details/"+str(pk))
def unsubscribe(request, pk):
ticket = Ticket.objects.get(id=pk)
ticket.status = 1
ticket.accepted = False
ticket.accepted_by = None
ticket.finished = False
ticket.finish_accepted = False
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=1).save()
return redirect("/ticket/details/"+str(pk))
def finish(request, pk):
ticket = Ticket.objects.get(id=pk)
ticket.status = 3
ticket.finished = True
ticket.finish_accepted = False
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=3).save()
return redirect("/ticket/details/"+str(pk))
def accept_finish(request, pk):
ticket = Ticket.objects.get(id=pk)
ticket.finish_accepted = True
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=3).save()
return redirect("/ticket/details/"+str(pk))
def decline_finish(request, pk):
ticket = Ticket.objects.get(id=pk)
ticket.status = 2
ticket.finished = False
ticket.finish_accepted = False
ticket.save()
TicketStatusHistory(user=request.user, ticket=ticket, status=2).save()
return redirect("/ticket/details/"+str(pk))
|
24,991 | c04569f93eeb79cf7714b2e81b6c0d3ffc512efa | from flask import request, jsonify, make_response
from app import db
from models import User
from serializers import user_schema
## ---------------------- User Registration ---------------------- ##
def USER_REGISTER():
json_data = request.get_json()
try:
username = json_data['username']
password = json_data['password']
except KeyError:
return make_response(jsonify({'message': 'No input data provided'}), 400) # missing arguments
if username is None or password is None:
return make_response(jsonify({'message': 'No input data provided'}), 400) # missing arguments
if User.query.filter_by(username = username).first() is not None:
return jsonify({'message': 'already exists', 'field': 'username', 'value': username}) # existing user
user = User(username = username)
user.hash_password(password)
try:
db.session.add(user)
db.session.commit()
except Exception as e:
print(e)
return make_response(jsonify({'message': 'failed'}), 422)
return user_schema.dump(user) |
24,992 | 7b3860a6ef5045957183d2efcdb42640b45ebe58 | # Write a program that lets the user play the game of Rock, Paper, Scissors
# against the computer. The program should work as follows.
#
# 1. When the program begins, a random number in the range of 1 through 3
# is generated. If the number is 1, then the computer has chosen rock. If
# the number is 2, then the computer has chosen paper. If the number is 3,
# then the computer has chosen scissors. (Don’t display the computer’s
# choice yet.)
# 2. The user enters his or her choice of “rock”, “paper”, or “scissors” at
# the keyboard.
# 3. The computer’s choice is displayed.
# 4. A winner is selected according to the following rules:
# • If one player chooses rock and the other player chooses scissors, then
# rock wins. (The rock smashes the scissors.)
# • If one player chooses scissors and the other player chooses paper, then
# scissors wins. (Scissors cuts paper.)
# • If one player chooses paper and the other player chooses rock, then
# paper wins. (Paper wraps rock.)
# • If both players make the same choice, the game must be played again to
# determine the winner.
|
24,993 | 886fcd03ed3560c8d89821bec56d512d261df5d7 | import os
from functools import partial
import aiohttp
from aiohttp import web
from utils import get_logger
from webapp import (
setup_routes,
setup_session,
setup_templates,
setup_static_routes,
setup_cache,
destroy_cache,
init_pg,
close_pg,
)
from webapp.helpers import setup_flash
def run():
app = web.Application()
logger = get_logger('webapp')
app['logger'] = logger
app.on_startup.append(setup_cache)
app.on_startup.append(init_pg)
app.on_shutdown.append(destroy_cache)
app.on_shutdown.append(close_pg)
setup_session(app)
setup_flash(app)
setup_routes(app)
setup_static_routes(app)
setup_templates(app)
uprint = partial(print, flush=True)
port = int(os.environ.get('PORT', 8080))
uprint('Running aiohttp {}'.format(aiohttp.__version__))
web.run_app(app, print=uprint, port=port)
if __name__ == '__main__':
run()
|
24,994 | 3724cc6531aaf92fea58d42d68cf50165d246d7b | #!/usr/bin/env python
import os
import unittest
import pandas as pd
from cardano_explorer import blockfrost_api
from cardano_explorer.blockfrost import util
from cardano_explorer import cnft_io
# Check if the Blockfrost API Key is configured in a environmental variable
# assert (os.getenv('BLOCKFROST_API_KEY') is not None), '[ERROR] Your blockfrost api key is not configured in your environement path.'
policy_id = '40fa2aa67258b4ce7b5782f74831d46a84c59a0ff0c28262fab21728'
cardano_mainnet = blockfrost_api.Auth('iSXrfNxhpPChKCnts2KX9MJ1eQ7exYgb')
class TEST_CNFT_IO(unittest.TestCase):
def test_policy(self):
self.assertTrue(isinstance(cnft_io.verified_policies(), list))
self.assertTrue(isinstance(cnft_io.get_policy_id('Clay Nation by Clay Mates'), list))
self.assertTrue(cnft_io.check_policy_id(policy_id, 'Clay Nation by Clay Mates'), list)
self.assertRaises(ValueError, cnft_io.check_policy_id, policy_id ,'hgvfv')
self.assertTrue(isinstance(cnft_io.get_project_info('Clay Nation by Clay Mates'), dict))
self.assertTrue(cnft_io.project_exist('Clay Nation by Clay Mates'), dict)
self.assertFalse(cnft_io.project_exist('hgvgy'), dict)
if __name__ == '__main__':
unittest.main()
|
24,995 | 9b018b7632a6d482064dbb69c3f8bd7698574346 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This script writes specfem sources into the respective simulation directories.
:copyright:
Lucas Sawade (lsawade@princeton.edu)
:license:
GNU Lesser General Public License, version 3 (LGPLv3)
(http://www.gnu.org/licenses/lgpl-3.0.en.html)
"""
from gcmt3d.source import CMTSource
from gcmt3d.data import SpecfemSources
from gcmt3d.asdf.utils import smart_read_yaml, is_mpi_env
import os
# Get logger to log progress
from gcmt3d import logger
def write_sources(cmt_filename, param_path):
# Define parameter directory
databaseparam_path = os.path.join(param_path,
"Database/DatabaseParameters.yml")
inversionparam_path = os.path.join(param_path,
"CMTInversion/InversionParams.yml")
# Load Parameters
DB_params = smart_read_yaml(databaseparam_path,
mpi_mode=is_mpi_env())
# Inversion Params
INV_params = smart_read_yaml(inversionparam_path, mpi_mode=is_mpi_env())
# File and directory
cmt_dir = os.path.dirname(cmt_filename)
cmt = CMTSource.from_CMTSOLUTION_file(cmt_filename)
outdir = os.path.join(cmt_dir, "CMT_SIMs")
# Basic parameters
dm = float(INV_params["config"]["dmoment"]) # 10**22 dyne*cm
dz = float(INV_params["config"]["ddepth"]) # 1000 m
ddeg = float(INV_params["config"]["dlocation"]) # 0.001 deg
logger.info(" ")
logger.info(" Perturbation parameters")
logger.info(" " + 50 * "*")
logger.info(" 𝚫M: %g" % dm)
logger.info(" 𝚫z: %g" % dz)
logger.info(" 𝚫deg: %g" % ddeg)
logger.info(" " + 50 * "*" + "\n")
# Create source creation class
sfsource = SpecfemSources(cmt, cmt_dir, npar=DB_params['npar'],
dm=dm, dx=dz, ddeg=ddeg,
verbose=DB_params['verbose'], outdir=outdir)
# Write sources
sfsource.write_sources()
|
24,996 | 45732f4610ae33b3835532ff139e09e4522f2dbe | import random
def menu():
while True:
print("Input command:")
command = input()
command_list = command.split()
global players
global player_x
global player_o
if command == "exit":
break
elif len(command_list) != 3:
print("Bad parameters!")
elif competitors.count(command_list[1]) > 0 and competitors.count(command_list[2]) > 0:
players = {"player_X": {"Level": command_list[1], "Mark": "X", "Id": 1},
"player_O": {"Level": command_list[2], "Mark": "O", "Id": 1}}
player_x = command_list[1]
player_o = command_list[2]
print(f"""---------
| {field[0]} {field[1]} {field[2]} |
| {field[3]} {field[4]} {field[5]} |
| {field[6]} {field[7]} {field[8]} |
---------""")
return player_x, player_o, players
else:
print("Bad parameters!")
return menu()
def human(mark, opponent_mark, id):
global field_list
move_x = 0
move_y = 0
while True:
print("Enter the coordinates:")
coordinates = str(input())
if not coordinates.replace(" ", "1").isdigit():
print("You should enter numbers!")
elif any(int(x) > 3 or int(x) < 1 for x in coordinates.split()):
print("Coordinates should be from 1 to 3!")
else:
break
if coordinates[0] == "1":
move_x = 0
elif coordinates[0] == "2":
move_x = 1
elif coordinates[0] == "3":
move_x = 2
if coordinates[2] == "1":
move_y = 0
elif coordinates[2] == "2":
move_y = -3
elif coordinates[2] == "3":
move_y = -6
while True:
if field_list[6 + move_x + move_y] != "_":
print("This cell is occupied! Choose another one!")
return human(mark, opponent_mark, id)
else:
field_list[6 + move_x + move_y] = mark
break
result = "".join(field_list).replace("_", " ")
print(f"""---------
| {result[0]} {result[1]} {result[2]} |
| {result[3]} {result[4]} {result[5]} |
| {result[6]} {result[7]} {result[8]} |
---------""")
return field_list
def ai_easy(mark, opponent_mark, id):
global field_list
move_x = 0
move_y = 0
coordinates = f'{random.choice(["1", "2", "3"])} {random.choice(["1", "2", "3"])}'
if coordinates[0] == "1":
move_x = 0
elif coordinates[0] == "2":
move_x = 1
elif coordinates[0] == "3":
move_x = 2
if coordinates[2] == "1":
move_y = 0
elif coordinates[2] == "2":
move_y = -3
elif coordinates[2] == "3":
move_y = -6
while True:
if field_list[6 + move_x + move_y] != "_":
return ai_easy(mark, opponent_mark, id)
else:
field_list[6 + move_x + move_y] = mark
break
result = "".join(field_list).replace("_", " ")
print('Making move level "easy"')
print(f"""---------
| {result[0]} {result[1]} {result[2]} |
| {result[3]} {result[4]} {result[5]} |
| {result[6]} {result[7]} {result[8]} |
---------""")
return field_list
def ai_medium(mark, opponent_mark, id):
move_x = 0
move_y = 0
i = 0
random_method = True
global field_list
for x in combinations:
if x.count(mark) == 2 and x.count("_") == 1:
field_list[combination_index[i * 3 + x.index("_")]] = mark
random_method = False
break
elif x.count(opponent_mark) == 2 and x.count("_") == 1:
field_list[combination_index[i * 3 + x.index("_")]] = mark
random_method = False
break
i += 1
if random_method:
coordinates = f'{random.choice(["1", "2", "3"])} {random.choice(["1", "2", "3"])}'
if coordinates[0] == "1":
move_x = 0
elif coordinates[0] == "2":
move_x = 1
elif coordinates[0] == "3":
move_x = 2
if coordinates[2] == "1":
move_y = 0
elif coordinates[2] == "2":
move_y = -3
elif coordinates[2] == "3":
move_y = -6
while True:
if field_list[6 + move_x + move_y] != "_":
return ai_medium(mark, opponent_mark, id)
else:
field_list[6 + move_x + move_y] = mark
break
result = "".join(field_list).replace("_", " ")
print('Making move level "medium"')
print(f"""---------
| {result[0]} {result[1]} {result[2]} |
| {result[3]} {result[4]} {result[5]} |
| {result[6]} {result[7]} {result[8]} |
---------""")
return field_list
def check_result(field_list, mark):
global combinations
global status
status = None
if field_list.count("_") == 0:
status = "Draw"
combinations = [[field_list[0], field_list[1], field_list[2]],
[field_list[3], field_list[4], field_list[5]],
[field_list[6], field_list[7], field_list[8]],
[field_list[0], field_list[3], field_list[6]],
[field_list[1], field_list[4], field_list[7]],
[field_list[2], field_list[5], field_list[8]],
[field_list[6], field_list[4], field_list[2]],
[field_list[0], field_list[4], field_list[8]]]
for x in combinations:
if x.count(mark) == 3:
status = "Win"
break
return status
def empty_fields(field_list):
empty_field_list = []
i = 0
for x in field_list:
if x == "_":
empty_field_list.append(i)
i +=1
return empty_field_list
def minimax(estimated_field, mark, opponent_mark, id):
global fc
moves = {}
fc += 1
best_score = -1000 * id
best_move = None
empty_field_list = empty_fields(estimated_field)
check_result(estimated_field, opponent_mark)
if status == "Win":
score = 10 * -id
return status, score
elif status == "Draw":
score = 0
return status, score
else:
for x in empty_field_list:
estimated_field[x] = mark
if mark == "X":
move, score = minimax(estimated_field, "O", "X", -id)
else:
move, score = minimax(estimated_field, "X", "O", -id)
estimated_field[x] = "_"
moves[x] = score
if id > 0:
for key, value in moves.items():
if int(value) > best_score:
best_score = int(value)
best_move = key
else:
for key, value in moves.items():
if int(value) < best_score:
best_score = value
best_move = key
return best_move, best_score
def ai_hard(mark, opponent_mark, id):
global field_list
scores = []
move_x = 0
move_y = 0
moves = empty_fields(field_list)
estimated_field = field_list.copy()
best_move, best_score = minimax(estimated_field, mark, opponent_mark, 1)
field_list[best_move] = mark
result = "".join(field_list).replace("_", " ")
print('Making move level "hard"')
print(f"""---------
| {result[0]} {result[1]} {result[2]} |
| {result[3]} {result[4]} {result[5]} |
| {result[6]} {result[7]} {result[8]} |
---------""")
return field_list
def player_x_choice(players):
return {"user": human, "easy": ai_easy, "medium": ai_medium, "hard": ai_hard}.get(players["player_X"]["Level"])
def player_o_choice(players):
return {"user": human, "easy": ai_easy, "medium": ai_medium, "hard": ai_hard}.get(players["player_O"]["Level"])
status = None
players = None
player_x = None
player_o = None
field = str(" ")
field_list = ["_", "_", "_", "_", "_", "_", "_", "_", "_"]
combinations = [[field_list[0], field_list[1], field_list[2]],
[field_list[3], field_list[4], field_list[5]],
[field_list[6], field_list[7], field_list[8]],
[field_list[0], field_list[3], field_list[6]],
[field_list[1], field_list[4], field_list[7]],
[field_list[2], field_list[5], field_list[8]],
[field_list[6], field_list[4], field_list[2]],
[field_list[0], field_list[4], field_list[8]]]
combination_index = [0, 1, 2, 3, 4, 5, 6, 7, 8, 0, 3, 6, 1, 4, 7, 2, 5, 8, 6, 4, 2, 0, 4, 8]
competitors = ["user", "easy", "medium", "hard"]
fc = 0
menu()
while True:
player_x_choice(players)("X", "O", 1)
check_result(field_list, "X")
if status == "Win":
print("X wins")
break
elif status == "Draw":
print("Draw")
break
player_o_choice(players)("O", "X", -1)
check_result(field_list, "O")
if status == "Win":
print("O wins")
break
elif status == "Draw":
print("Draw")
break
|
24,997 | a042dc975fa5eae49b24386c93042a0e83e5c986 | # Generated by Django 2.0.7 on 2018-07-16 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('allocate', '0002_auto_20180716_1205'),
]
operations = [
migrations.AddField(
model_name='chore',
name='fixedValue',
field=models.FloatField(blank=True, null=True),
),
]
|
24,998 | b1c963d40be598ac84b7d15df003677b77c4dd77 | import sys,socket, time, threading
class Page:
def __init__(self, display):
#How often should this paged be refreshed
self.freq = None
self.display = display
self.lines = list()
for i in range(self.display.rows):
self.lines.append("")
def setline(self, line, text):
self.lines[line] = text
def write_page(self):
data = ""
for i in range(self.display.rows):
data += self.display.writeline(i, self.lines[i])
return data
def update(self):
pass
class Pages:
def __init__(self):
self.cur_page = -1
self.pages = list()
def add_page(self, page):
self.pages.append(page)
def current_page(self):
return self.pages[self.cur_page]
def write_current_page(self):
return self.current_page().write_page()
def next_page(self):
self.cur_page += 1
if self.cur_page == len(self.pages):
self.cur_page = 0
return self.write_current_page()
class PageScheduler():
def __init__(self, client, pages, switch_time = 5):
self.client = client
self.pages = pages
self.switch_time = switch_time
def run(self):
while 1:
try:
self.switch_page()
except Exception as e:
print "Exception: ", e
def switch_page(self):
self.pages.next_page()
t = time.time()
freq = self.pages.current_page().freq;
while time.time() < t + self.switch_time:
self.update_page()
self.client.send(self.pages.write_current_page())
if freq is None:
time.sleep(self.switch_time)
else:
time.sleep(freq)
def update_page(self):
self.pages.current_page().update()
|
24,999 | ddf5bf4367427424f8d934b9333bd0c760f4de70 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import sys
import time
import json
import signal
import random
import logging
import traceback
import threading
from subprocess import Popen, PIPE
sys.path.append('/root/pyBus/lib/')
# Imports for the project
import pyBus_module_display as pB_display
import pyBus_module_audio as pB_audio
import pyBus_util as pB_util
import pyBus_cdc as pB_cdc
############################################################################
# CONFIG
############################################################################
versionPB = 'v1.1'
WRITER = None
menuLevel = None
threadDisplay = None
display = None
ERROR = None
updateIndex = True
TrackInfo = None
TICK = 0.2 # sleep interval in seconds used after displaying a string (default 1)
############################################################################
# FUNCTIONS
############################################################################
def init(writer):
global WRITER
global display
WRITER = writer
display = pB_display.busWriter(WRITER)
pB_audio.init()
threadDisplay = displayIO()
threadDisplay.start()
def end():
if threadDisplay:
threadDisplay.stop()
pB_audio.end()
logging.debug("Quitting Audio CLIENT")
def error(errorID):
global ERROR
ERROR = errorID
############################################################################
# Scroll text
############################################################################
class TextScroller:
'Class for scrolling text'
text = ''
position = 0
textLength = 0
def __init__(self, initialText):
self.text = initialText
def scroll(self):
doubleText = self.text + ' ' + self.text
scrolledText = doubleText[self.position:len(doubleText)]
self.position = self.position + 1
# We add five extra spaces between each complete text scroll.
if self.position > len(self.text) + 1:
self.position = 0
return scrolledText
def setNewText(self, newText):
self.text = newText
self.position = 0
############################################################################
# DISPLAY CLASS
############################################################################
class displayIO(threading.Thread):
def __init__(self):
display.writeTitleT0('Initialized')
global menuLevel
menuLevel = 'homeMain'
threading.Thread.__init__(self)
############################################################################
def run(self):
global updateIndex
global ERROR
global versionPB
global menuLevel
logging.info('Display thread initialized')
while True:
if menuLevel == 'homeMain': # HOME
display.writeTitleT0('BMW-MUSIC')
time.sleep(TICK)
display.writeTitleT1(' ')
time.sleep(TICK)
display.writeTitleT3(versionPB)
time.sleep(TICK)
display.writeTitleT4(' ')
time.sleep(TICK)
display.writeTitleT6('Not chosen')
time.sleep(TICK)
if ERROR is None:
display.writeTitleT5('Status:')
time.sleep(TICK)
display.writeTitleT2('OK')
time.sleep(TICK)
else:
display.writeTitleT5('Error:')
time.sleep(TICK)
display.writeTitleT2(ERROR)
time.sleep(TICK)
if updateIndex is True:
display.refreshIndex()
time.sleep(TICK)
display.writeIndexF0('Volumio')
time.sleep(TICK)
display.writeIndexF1('Bluetooth')
time.sleep(TICK)
display.writeIndexF2('AirPlay')
time.sleep(TICK)
display.writeIndexF3('')
time.sleep(TICK)
display.writeIndexF4('')
time.sleep(TICK)
display.writeIndexF5('Reboot')
time.sleep(TICK)
display.writeIndexF6('Shutdown')
time.sleep(TICK)
display.writeIndexF7('')
time.sleep(TICK)
display.writeIndexF8('')
time.sleep(TICK)
display.writeIndexF9('')
time.sleep(TICK)
updateIndex = False
elif menuLevel == 'btMain': # Bluetooth Main
display.writeTitleT0(
'%s - %s' % (pB_audio.getTrackInfo().get('artist'), pB_audio.getTrackInfo().get('title')))
time.sleep(TICK)
display.writeTitleT1(' ')
time.sleep(TICK)
display.writeTitleT3(' ')
time.sleep(TICK)
display.writeTitleT4(' ')
time.sleep(TICK)
display.writeTitleT6('Bluetooth')
time.sleep(TICK)
if ERROR is None:
display.writeTitleT5('%s' % pB_audio.getTrackInfo().get('status'))
time.sleep(TICK)
display.writeTitleT2(' ')
time.sleep(TICK)
else:
display.writeTitleT5('Error:')
time.sleep(TICK)
display.writeTitleT2(ERROR)
time.sleep(TICK)
if updateIndex is True:
display.refreshIndex()
time.sleep(TICK)
display.writeIndexF0('Select device')
time.sleep(TICK)
display.writeIndexF1('Add a new device')
time.sleep(TICK)
display.writeIndexF2('')
time.sleep(TICK)
display.writeIndexF3('')
time.sleep(TICK)
display.writeIndexF4('')
time.sleep(TICK)
display.writeIndexF5('')
time.sleep(TICK)
display.writeIndexF6('')
time.sleep(TICK)
display.writeIndexF7('')
time.sleep(TICK)
display.writeIndexF8(' ')
time.sleep(TICK)
display.writeIndexF9('')
time.sleep(TICK)
updateIndex = False
elif menuLevel == 'btSelectDevice': # Bluetooth -> Select device
pass
elif menuLevel == 'btSelectedDevice': # Bluetooth -> Select device -> selected device
pass
elif menuLevel == 'btNewDevice': # Bluetooth -> Add a new device
pass
elif menuLevel == 'vlmMain': # Volumio Main
pass
elif menuLevel == 'apMain': # AirPlay Main
pass
def stop(self):
logging.info('Display shutdown')
self._Thread__stop()
############################################################################
# BUTTON CLASS
############################################################################
class buttonIO(object):
def infoP(self):
pass
def infoH(self):
pass
def infoR(self):
pass
def button1P(self):
pass
def button1H(self):
pass
def button1R(self):
pass
def button2P(self):
pass
def button2H(self):
pass
def button2R(self):
pass
def button3P(self):
pass
def button3H(self):
pass
def button3R(self):
pass
def button4P(self):
pass
def button4H(self):
pass
def button4R(self):
pass
def button5P(self):
pass
def button5H(self):
pass
def button5R(self):
pass
def button6P(self):
pass
def button6H(self):
pass
def button6R(self):
pass
def ArrowLP(self):
pass
def ArrowLH(self):
pass
def ArrowLR(self):
pass
def ArrowRP(self):
pass
def ArrowRH(self):
pass
def ArrowRR(self):
pass
def ArrowP(self):
pass
def ArrowH(self):
pass
def ArrowR(self):
pass
def modeP(self):
pass
def modeH(self):
pass
def modeR(self):
pass
def slctIndexF0(self):
pass
def slctIndexF1(self):
pass
def slctIndexF2(self):
pass
def slctIndexF3(self):
pass
def slctIndexF4(self):
pass
def slctIndexF5(self):
pass
def slctIndexF6(self):
pass
def slctIndexF7(self):
pass
def slctIndexF8(self):
pass
def slctIndexF9(self):
pass
def wheelRT(self):
pass
def wheelVoiceP(self):
pass
def wheelVoiceH(self):
pass
def wheelVoiceR(self):
pass
def wheelArrowUP(self):
pass
def wheelArrowUH(self):
pass
def wheelArrowUR(self):
pass
def wheelArrowDP(self):
pass
def wheelArrowDH(self):
pass
def wheelArrowDR(self):
pass
############################################################################
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.