index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
11,900 | 65725b7b61ba7a665624c73521b2ff76b3481e28 | from __future__ import division
from tthAnalysis.bdtHyperparameterOptimization import xgb_tools as xt
import os
import shutil
import urllib
import gzip
from tthAnalysis.bdtHyperparameterOptimization import mnist_filereader as mf
dir_path = os.path.dirname(os.path.realpath(__file__))
resources_dir = os.path.join(dir_path, 'resources')
tmp_folder = os.path.join(resources_dir, 'tmp')
if not os.path.exists(tmp_folder):
os.makedirs(tmp_folder)
main_url = 'http://yann.lecun.com/exdb/mnist/'
train_images = 'train-images-idx3-ubyte'
train_labels = 'train-labels-idx1-ubyte'
test_images = 't10k-images-idx3-ubyte'
test_labels = 't10k-labels-idx1-ubyte'
file_list = [train_labels, train_images, test_labels, test_images]
sample_dir = os.path.join(tmp_folder, 'samples_mnist')
nthread = 2
if not os.path.exists(sample_dir):
os.makedirs(sample_dir)
for file in file_list:
file_loc = os.path.join(sample_dir, file)
file_url = os.path.join(main_url, file + '.gz')
urllib.urlretrieve(file_url, file_loc + '.gz')
with gzip.open(file_loc + '.gz', 'rb') as f_in:
with open(file_loc, 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
data_dict = mf.create_datasets(sample_dir, 16)
def test_initialize_values():
value_dict1 = {
'p_name': 'test1',
'range_start': 0,
'range_end': 10,
'true_int': 'True'
}
value_dict2 = {
'p_name': 'test2',
'range_start': 0,
'range_end': 10,
'true_int': 'False'
}
value_dicts = [value_dict1, value_dict2]
result = xt.initialize_values(value_dicts)
assert result['test2'] >= 0 and result['test2'] <= 10
assert isinstance(result['test1'], int)
def test_prepare_run_params():
nthread = 28
value_dict1 = {
'p_name': 'test1',
'range_start': 0,
'range_end': 10,
'true_int': 'True'
}
value_dict2 = {
'p_name': 'test2',
'range_start': 0,
'range_end': 10,
'true_int': 'False'
}
value_dicts = [value_dict1, value_dict2]
sample_size = 3
result = xt.prepare_run_params(
value_dicts,
sample_size
)
sum = 0
for i in result:
if isinstance(i['test1'], int):
sum +=1
assert len(result) == 3
assert sum == 3
def test_parameter_evaluation():
parameter_dict = {
'num_boost_round': 71,
'learning_rate': 0.07,
'max_depth': 2,
'gamma': 1.9,
'min_child_weight': 18,
'subsample': 0.9,
'colsample_bytree': 0.8
}
nthread = 28
num_class = 10
results = xt.parameter_evaluation(
parameter_dict, data_dict, nthread, num_class)
assert results != None
def test_ensemble_fitnesses():
parameter_dicts = [
{
'num_boost_round': 71,
'learning_rate': 0.07,
'max_depth': 2,
'gamma': 1.9,
'min_child_weight': 18,
'subsample': 0.9,
'colsample_bytree': 0.8
},
{
'num_boost_round': 72,
'learning_rate': 0.17,
'max_depth': 3,
'gamma': 1.9,
'min_child_weight': 18,
'subsample': 0.9,
'colsample_bytree': 0.8
}
]
global_settings = {'num_classes': 10, 'nthread': 28}
results = xt. ensemble_fitnesses(
parameter_dicts, data_dict, global_settings)
assert results != None
def test_dummy_delete_files():
if os.path.exists(tmp_folder):
shutil.rmtree(tmp_folder)
|
11,901 | ad85bae4eb45425a74c2c8191c37232746a70dd0 | # uncompyle6 version 3.3.5
# Python bytecode 2.7 (62211)
# Decompiled from: Python 3.7.3 (default, Apr 24 2019, 15:29:51) [MSC v.1915 64 bit (AMD64)]
# Embedded file name: c:\Jenkins\live\output\win_64_static\Release\python-bundle\MIDI Remote Scripts\_Framework\Defaults.py
# Compiled at: 2018-11-30 15:48:11
from __future__ import absolute_import, print_function, unicode_literals
TIMER_DELAY = 0.1
MOMENTARY_DELAY = 0.3
MOMENTARY_DELAY_TICKS = int(MOMENTARY_DELAY / TIMER_DELAY) |
11,902 | 98f5a98b84b86f91b5de475f1bf8efead37871f8 | import pandas as pd
import numpy as np
dataset = False
if dataset:
filename = 'all_features_test_OneHot.csv'
else:
filename = 'all_features_train_OneHot.csv'
df = pd.read_csv(filename)
df.head()
df=df.drop('Unnamed: 0',axis=1)
df.shape
from sklearn.model_selection import train_test_split
from sklearn.feature_selection import mutual_info_classif
from sklearn.feature_selection import SelectKBest
X=df.drop(['label', 'repeattrips', 'id'], axis=1) #測試資料為2~最後筆資料
y =df['label'] #標籤是第一筆
print(type(X))
print(type(y))
X_train, X_test, y_train, y_test = train_test_split(X, y,
test_size=0.80,random_state = 1)
# #SelectKBest(Mutual Information)
# #測量兩個變數的相依性
# # 選擇要保留的特徵數
# select_k = 50
# selection = SelectKBest(mutual_info_classif, k=select_k).fit(X_train, y_train)
# #selection.shape
# # 顯示保留的欄位
# features = X_train.columns[selection.get_support()]
# #features =selection.get_feature_names_out(input_features=None)
# print(features)
#ANOVA Univariate Test
#單變項檢定(univariate test),衡量兩個變數的相依性。這個方法適用於連續型變數和二元標的(binary targets)。
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import SelectKBest
# 選擇要保留的特徵數
select_k = 50
selection = SelectKBest(f_classif, k=select_k).fit(X_train, y_train)
# 顯示保留的欄位
features = X_train.columns[selection.get_support()]
print(features)
#Generate CSV file
filename = features
Result ='ANOVA_values_train.csv'
def OutputCSV():
df_SAMPLE = pd.DataFrame.from_dict(filename)
df_SAMPLE.to_csv( Result , index= True )
print( '成功產出'+Result )
OutputCSV()
#Univariate ROC-AUC /RMSE
#使用機器學習模型來衡量兩個變數的相依性,適用於各種變數,且沒對變數的分布做任何假設。
#回歸性問題使用RMSE,分類性問題使用ROC-AUC。
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import roc_auc_score
# ROC的分數
roc_values = []
# 計算分數
for feature in X_train.columns:
clf = DecisionTreeClassifier()
clf.fit(X_train[feature].to_frame(), y_train)
y_scored = clf.predict_proba(X_test[feature].to_frame())
roc_values.append(roc_auc_score(y_test, y_scored[:, 1]))
# 建立Pandas Series 用於繪圖
roc_values = pd.Series(roc_values)
roc_values.index = X_train.columns
# 顯示結果
roc_values.to_csv('../SCORE.csv', encoding='big5')
print(roc_values.sort_values(ascending=False))
#Generate CSV file
filename = roc_values.sort_values(ascending=False)
Result ='ROCAUC_values_train.csv'
def OutputCSV():
df_SAMPLE = pd.DataFrame.from_dict(filename)
df_SAMPLE.to_csv( Result , index= True )
print( '成功產出'+Result )
OutputCSV()
# from sklearn.tree import DecisionTreeClassifier
# from sklearn.metrics import mean_squared_error
# # RMSE的分數
# roc_values = []
# # 計算分數
# for feature in X_train.columns:
# clf = DecisionTreeClassifier()
# clf.fit(X_train[feature].to_frame(), y_train)
# y_scored = clf.predict_proba(X_test[feature].to_frame())
# roc_values.append(mean_squared_error(y_test, y_scored[:, 1]))
# # 建立Pandas Series 用於繪圖
# roc_values = pd.Series(roc_values)
# roc_values.index = X_train.columns
# # 顯示結果
# roc_values.to_csv('../SCORE2.csv', encoding='big5')
# print(roc_values.sort_values(ascending=False)) |
11,903 | 53504dd48a9d6960937430c1915baba4ac6079a3 | from StringIO import StringIO
from pandas import DataFrame, options
from IPython.display import HTML, display, clear_output
import sys
options.display.float_format = '{:2.2f}%'.format
class Status(StringIO):
def __init__(self, total, line_count):
StringIO.__init__(self)
self.container = {}
self.counter = {}
self.total = total
self._in_nb = self._is_notebook()
def _is_notebook(self):
try:
from IPython.core.interactiveshell import InteractiveShell
from IPython.kernel.zmq.zmqshell import ZMQInteractiveShell as notebook
from IPython.terminal.interactiveshell import TerminalInteractiveShell as shell
if InteractiveShell.initialized():
ip = get_ipython()
if isinstance(ip, notebook):
return True
elif isinstance(ip, shell):
return False
else:
raise Exception('Wrong Shell')
else:
return False
except Exception as e:
self.print_error(e)
return False
def remaining_time(self):
pass
def elapsed_time(self):
pass
def fmt(self, prog):
if self._in_nb:
return self._nb_fmt(prog)
else:
return self._console_fmt(prog)
def _nb_fmt(self, prog):
string = '<progress value="{prog:2.2f}" max="100"></progress>'
string = string.format(prog=prog)
return string
def _console_fmt(self, prog):
full = (prog) / 3
empty = 33 - full
progress = '#' * full + ' ' * empty
string = '[' + progress + ']'
return string
def write(self, name):
try:
self.counter[name] = self.counter.get(name, len(self.counter))
lineno = self.counter[name]
self.container[lineno] = self.container.get(lineno, {'name': name, 'count': 0})
self.container[lineno]['count'] += 1
count = self.container[lineno]['count']
interval = int(.05 * self.total)
interval = interval if interval > 0 else 1
if (count % interval == 0):
clear_output(True)
self.flush()
except Exception as e:
self.print_error(e)
def flush(self):
try:
data = self.container
columns = ['name', 'count']
df = DataFrame(data).T
df['progress'] = df['count'].astype(int) / self.total * 100
df['update'] = df['progress']
df.sort_index(inplace=True)
fields = ['name', 'progress', 'update']
df['update'] = df['update'].apply(self.fmt)
board = df[fields]
if self._in_nb:
display(HTML(board.to_html(escape=False)))
except Exception as e:
self.print_error(e)
def print_error(self, e):
string = '\n\t'.join([
'{0}', # Exception Type
'filename: {1}', # filename
'lineno: {2}\n']) # lineno of error
fname = sys.exc_info()[2].tb_frame.f_code.co_filename
tb_lineno = sys.exc_info()[2].tb_lineno
args = (repr(e), fname, tb_lineno)
sys.stderr.write(string.format(*args))
sys.stderr.flush() |
11,904 | 6ec05a098ac506163ee9445ed68945a233bb2a8c | # A car can cover distance of N kilometers per day. How many days will it take to cover a route of length M kilometers? The program gets two numbers: N and M.
import math
n = int(input())
m = int(input())
days = math.ceil(m / n)
print(days) |
11,905 | 63bb9b2d13584ad3fd63438c12ff4a93d7fe31b9 | import os
from time import time, ctime
import shutil
defaultargs = {
"backup_off" : None,
"number_of_concordances" : 10,
"buffer_size" : 1000000,
"disable_english_filter" : None,
"extend_corpus" : None,
"verbosity" : 0,
"encoding" : None,
"max_per_page" : None,
"output" : "",
"part_of_speech" : ".*",
"bazword_generator" : "RANDOM",
"format" : "json",
"word" : [],
"backup_file" : "ConcordanceCrawler.backup",
"continue_from_backup" : None,
}
DIR = 'static/jobs/'
def get_next_job_id():
try:
with open("jobid","r") as f:
id = int(f.read())+1
except FileNotFoundError:
id = 1
finally:
f = open("jobid","w")
f.write(str(id))
f.close()
return id
def create_new_job(data):
args = defaultargs.copy()
for k,v in data.items():
if k=='target': continue
args[k] = v
args['number_of_concordances'] = int(args['number_of_concordances'])
args['max_per_page'] = int(args['max_per_page'])
args['word'] = data['target'].split()
id = get_next_job_id()
path = DIR+'job'+str(id)
os.makedirs(path)
args['output'] = path+'/corpus.json'
backup = open(path+'/backup',"w")
backup.write(str(args))
backup.close()
status = open(path+'/status','w')
status.write('CREATED\n')
status.close()
tf = open(path+'/time','w')
tf.write(str(time()))
tf.close()
return 'OK'
def get_status(jobid):
try:
f = open(DIR+jobid+"/status","r")
except IOError:
return "DELETED"
status = f.readlines()[-1].strip()
f.close()
if status=="FINISHED" and get_percent_str(jobid)!="100":
return "ABORTED"
return status
def get_args(jobid):
f = open(DIR+jobid+"/backup","r")
b = f.read()
f.close()
args = eval(b)
return args
def get_target(jobid):
return " ".join(get_args(jobid)['word'])
def get_pos(jobid):
return get_args(jobid)['part_of_speech']
def get_max_per_page(jobid):
return get_args(jobid)['max_per_page']
def get_english_filter(jobid):
return get_args(jobid)['disable_english_filter']
def get_bazgen(jobid):
return get_args(jobid)['bazword_generator']
def get_encoding(jobid):
return get_args(jobid)['encoding']
def get_time(jobid):
'''returns submition time in unix format as float'''
f = open(DIR+jobid+"/time","r")
t = float(f.read().strip())
f.close()
return t
def get_ctime(jobid):
'''returns submition time as string in human readable form'''
t = get_time(jobid)
return ctime(t).strip()
zero_crawling_status = """serp 0 (0 errors)
links crawled 0 (0 filtered because of format suffix, 0 crawled repeatedly)
pages visited 0 (0 filtered by encoding filter, 0 filtered by language filter, 0 errors)
concordances 0 (0 crawled repeatedly)""".split("\n")
def get_crawling_status(jobid):
try:
f = open(DIR+jobid+"/logfile.txt","r")
except FileNotFoundError:
return zero_crawling_status
cs = []
i = -1
for line in f:
if "STATUS: Crawling status" in line:
i = 0
cs = []
if 1 <= i <= 4:
cs.append(line.strip())
if i==4:
i = -1
elif i!=-1:
i += 1
f.close()
if not cs:
return zero_crawling_status
return cs
def get_concordances_crawled(jobid):
'''returns number of concordances actually crawled after restart'''
num = get_crawling_status(jobid)[-1].split()[1]
return int(num)
def get_number_of_concordances(jobid):
'''returns desired number of concordances'''
return get_args(jobid)['number_of_concordances']
def get_percent_str(j):
return str(int(min(100,get_concordances_crawled(j)/get_number_of_concordances(j)*100)))
def delete_job(jobid):
try:
shutil.rmtree(DIR+jobid)
except FileNotFoundError:
return "ERROR"
else:
return "OK"
def get_corpus(jobid, start=0, limit=None):
f = open(DIR+jobid+"/corpus.json","r")
corp = f.readlines()
f.close()
if limit is None:
limit = len(corp)
return corp[start:start+limit]
def browse_jobs():
return os.listdir(DIR)
if __name__ == "__main__":
for j in browse_jobs():
break
print(get_status(j),get_target(j))
print(j)
print("\n".join(get_crawling_status(j)))
print(get_time(j))
break
# print(zero_crawling_status)#[-1].split("\t"))
# print(get_percent_str("job37"))
print(delete_job("job37"))
|
11,906 | c4b889201df491f305d142fbfeaf8a114c18c75f | #!/usr/bin/env python
import keylogger
my_keylogger = keylogger.Keylogger(4,"youremail@email.com","yourpassword")
my_keylogger.start() |
11,907 | 51eda076269bd311c60227a31c075dcb05ab365c | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="datacleanbot",
version="0.4",
author="Ji Zhang",
author_email="",
description="automated data cleaning tool",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/Ji-Zhang/datacleanbot",
packages=setuptools.find_packages(),
install_requires=[
'numpy>=1.14.2',
'pandas',
'scikit-learn>=0.20.0',
'scipy>=1.0.0',
'seaborn>=0.8',
'matplotlib>=2.2.2',
'missingno>=0.4.0',
'fancyimpute',
'numba>=0.27',
'pystruct>=0.2.4',
'cvxopt>=1.1.9',
'pymc3>=3.4',
'pyro-ppl>=0.2',
'rpy2==2.9.4'],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
],
) |
11,908 | ce81168685d42b63355450913856950e68e85df0 | def square_matrix():
from numpy import zeros, linalg, linspace, sqrt, sin, pi, sum, sort
A = zeros([5, 5])
n = 0
while n <= 4:
A[n, n] = 2
n = n + 1
n = 0
while n <= 3:
A[n, n + 1] = 1
A[n + 1, n] = 1
n = n + 1
H = (1/ (1/4)) * A
return H
def lowest_eigenvectors(square_matrix, number_of_eigenvectors=3):
from numpy import linalg, linspace, sqrt, sin, pi, sum, sort, argsort
M_rows = len(square_matrix)
count = 0
while count < M_rows:
M_columns = len(square_matrix[count])
if not M_rows == M_columns:
return print("Square matrix must have M rows and M columns, M >= 1.")
(V, D) = linalg.eig(square_matrix)
ordered_indices = argsort(V)
eigenvalues = (V[ordered_indices[0:number_of_eigenvectors]])
eigenvectors = (D[ordered_indices[0:number_of_eigenvectors]])
return eigenvalues, eigenvectors
|
11,909 | b639bb987276e1afd870b0537138d695495e3451 | from tenable_io.api.base import BaseApi
from tenable_io.api.models import AgentList
class AgentsApi(BaseApi):
def delete(self, agent_id):
"""Deletes the given agent.
:param agent_id: The Agent ID.
:raise TenableIOApiException: When API error is encountered.
:return: True if successful.
"""
self._client.delete('scanners/1/agents/%(agent_id)s', path_params={'agent_id': agent_id})
return True
def list(self):
"""Lists agents for the given scanner.
:raise TenableIOApiException: When API error is encountered.
:return: An instance of :class:`tenable_io.api.models.AgentList`.
"""
response = self._client.get('scanners/1/agents')
return AgentList.from_json(response.text)
|
11,910 | 00f6f2a7c09363f10999548f7a6bd396bbf34e86 | # raja op
from cryptography.hazmat.primitives import hashes, padding
from cryptography.hazmat.primitives.ciphers import Cipher, algorithms, modes
import os
'''msg=msg.encode()
key=key.encode()'''
iv = os.urandom(16)
def has(key):
digest = hashes.Hash(hashes.SHA256())
digest.update(key)
key=digest.finalize()
return key
def encrypt(msg,key,iv):
padder = padding.PKCS7(algorithms.AES.block_size).padder()
msg = padder.update(msg) + padder.finalize()
print("padd msg",msg)
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
encryptor = cipher.encryptor()
msg = encryptor.update(msg)
encryptor.finalize()
return msg
def decrypt(data,key,iv):
cipher = Cipher(algorithms.AES(key), modes.CBC(iv))
decryptor = cipher.decryptor()
data = decryptor.update(data)
decryptor.finalize()
print("dec pad msg",data)
unpadder = padding.PKCS7(algorithms.AES.block_size).unpadder()
data = unpadder.update(data) + unpadder.finalize()
return data
msg="hello there we are here"
key="pass"
msg=msg.encode()
key=key.encode()
print('encode',msg)
print('encode',key)
''' hashing key '''
key=has(key)
print("#key",key)
''' padding message and then encryption '''
data=encrypt(msg,key,iv)
print("see enc",data)
''' decryption and then unpadding '''
let_see=decrypt(data,key,iv)
print("see dec",let_see)
|
11,911 | dfb1cc6cd699956ac3bf63722c250ec913cbc1ca | """
File: phone_book.py
-----------------
This program allows the user to store and lookup phone numbers
in a phone book. They can "add", "lookup" or "quit".
"""
def main():
print("Welcome to Phone Book! This program stores phone numbers of contacts. You can add a new number, "
"get a number,or quit ('add', 'lookup', 'quit').")
print('Enter your command at the prompt.')
phone_book = dict()
while True:
choice = input("(add', 'lookup', 'quit') > ").lower()
if choice == 'add':
name = input('name? ').lower()
number = input('number? ')
phone_book[name] = number
elif choice == 'lookup':
name = input('name? ').lower()
if name in phone_book.keys():
print(phone_book[name])
else:
print(name + ' not found.')
elif choice == 'quit':
quit()
if __name__ == '__main__':
main()
|
11,912 | 8cee71fe7be0cf9b19628563967927368db37f69 | from asyncio import wait
from telnetlib import EC
from selenium.common.exceptions import TimeoutException
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.wait import WebDriverWait
from tbselenium.tbdriver import TorBrowserDriver
import time
from multiprocessing import Pool
import math
from screeninfo import get_monitors
# Your vote function
def vote(args):
for a in range(args[4]):
with TorBrowserDriver("/home/ernestas/Desktop/tor-browser_en-US") as driver:
driver.set_window_rect(x=args[0], y=args[1], width=args[2], height=args[3])
driver.set_page_load_timeout(60)
try:
driver.get(
'https://apklausa.lt/f/ar-reikia-padaryti-naujas-gyvenvietes-p5sma7d/answers/new.fullpage')
driver.find_element_by_css_selector("[data-id='11577040']").click()
driver.find_element_by_css_selector(".submit").send_keys(Keys.ENTER),
time.sleep(10)
finally:
driver.close()
# Your vote_parallel function
def vote_parallel(threads, votes):
width = 0
height = 0
for m in get_monitors():
width = m.width
height = m.height
n = math.ceil(math.sqrt(threads))
array = []
size_x = width / n
size_y = height / n
count = 0
for y in range(n):
for i in range(n):
if count <= threads:
array.append([width / n * i, height / n * y, size_x, size_y, int(votes / threads)])
count += 1
pool = Pool(threads)
pool.map(vote, array)
def main():
count = 9
votes = 10
vote_parallel(count, votes)
if __name__ == "__main__":
main()
|
11,913 | e838cb8e57fa940f997ba6153be2cf3b26133c6a | #!/usr/bin/python3
class Square:
pass
s = Square()
|
11,914 | 2d7e9f015852faaed561a9ba08fab49371322253 | year=int(input("Enter a year"))
if((year%4==0 and year%100!=0) or (year%400==0)):
print(f"{year} is LEAP YEAR...")
else:
print(f"{year} is COMMON YEAR....")
|
11,915 | d359387a4a920824f36c8bd0bdb19b4a5e6f8e87 |
from StackLinked import StackLinked
stack = StackLinked()
print("------------------------------------")
print("-\t\t\"EMPTY STACK LIST \" - \n\n")
stack.peek()
print("=====================================\n")
print("------------------------------------")
print("-\t\t\"PUSH ELEMENT \" -")
print("------------------------------------\n")
stack.push("rashmi")
stack.push("saman")
stack.push("kamal")
stack.push("sunimal")
stack.push("samana dissanayaka")
print("\n\n=====================================")
print("\tTop element is >>> ", stack.peek())
print("=====================================\n")
print("\n=====================================")
print("\t stack size is >>> ", stack.size())
print("=====================================\n")
print("\n=====================================")
stack.pop()
print("=====================================")
print("\n=====================================")
print("\t stack size is >> ", stack.size())
print("=====================================\n")
print("\n=====================================")
print("\tTop element is >> ", stack.peek())
print("=====================================\n")
print("\n=====================================")
stack.pop()
stack.pop()
stack.pop()
print("=====================================")
print("\n=====================================")
print("\t stack size is >> ", stack.size())
print("=====================================\n")
print("\n=====================================")
stack.pop()
print("=====================================")
print("\n=====================================")
print("\t stack size is >> ", stack.size())
print("=====================================\n")
print("\n=====================================")
stack.pop()
print("=====================================\n\n")
print("============isEMPTY RESULT=================")
print(stack.isEmpty()) |
11,916 | d04466f11658abfe0603529e328b7942634dae11 | #!/usr/bin/env python
'''
set a valid door name, find '***'
'''
import taurus
import sys
from taurus.external.qt import QtGui
from taurus.external.qt import QtCore
from taurus.qt.qtgui.application import TaurusApplication
# ***
DOOR_NAME = 'p09/door/haso107d1.01'
def main():
app = QtGui.QApplication.instance()
if app is None:
#app = QtGui.QApplication(sys.argv)
app = TaurusApplication( [])
import demoDoor
door = taurus.Device( DOOR_NAME)
sys.exit( app.exec_())
if __name__ == "__main__":
main()
|
11,917 | 4a9c2973069852d807bac059e3caa1731899dd9b | import os
from getpass import getpass
from netmiko import ConnectHandler
from datetime import datetime
start_time = datetime.now()
log_file = open("log.txt", "w")
device1 = {
"host": "nxos2.lasthop.io",
"username": "pyclass",
"password": "88newclass",
"device_type": "cisco_ios",
"global_delay_factor": 2,
"session_log": "my_session.txt"
}
net_connect = ConnectHandler(**device1)
output = net_connect.send_command("Show lldp neighbors detail")
print(output)
log_file.write(output)
net_connect.disconnect()
end_time = datetime.now()
print(end_time - start_time)
|
11,918 | ca1f9590b3f3f8a7f55f47c508286f6a75912e8a | #REFERENCES:
#https://pythonprogramming.net/client-chatroom-sockets-tutorial-python-3/
#https://realpython.com/python-sockets/
#############################################!!!IMPORTANT!!!#############################################
#allow 1234 port on BOTH raspberry pi and PC server. For that, on linux command line, type :sudo ufw allow 1234
#########################################################################################################
import socket
import select
import ssl
from ast import literal_eval
import pprint
import itertools
import time
import math
import asyncore
##def start_server_side_TLS():
## HOST = '192.168.43.229'
## PORT = 1234
## pemServer = 'server.pem'
## keyServer = 'server.key'
## pemClient = 'client.pem'
##
## server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
## server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
## server_socket.bind((HOST, PORT))
## server_socket.listen(10)
##
## #client, fromaddr = server_socket.accept()
## client_socket, client_address = server_socket.accept()
## secure_sock = ssl.wrap_socket(client_socket, server_side=True, ca_certs=pemClient, certfile=pemServer,
## keyfile=keyServer, cert_reqs=ssl.CERT_REQUIRED,
## ssl_version=ssl.PROTOCOL_SSLv23)
##
## print(repr(secure_sock.getpeername()))
## print(secure_sock.cipher())
## cert = secure_sock.getpeercert()#server_socket
## print(pprint.pformat(cert))
##
## # verify client
## if cert != cert: raise Exception("ERROR")
##
## try:
## data = secure_sock.read(1024)
## secure_sock.write(data)
## finally:
## secure_sock.close()
## server_socket.close()
##LENGTH = 1000
##IP = '192.168.1.71'
##PORT = 1234
##server_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
##server_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
##server_socket.bind((IP, PORT))
##server_socket.listen()
#class Shape:
#def __init__(self,server_socket1,server_socket,socket_list):
LENGTH = 1000
HOST = '192.168.1.71'
PORT = 1234
pemServer = 'server.pem'
keyServer = 'server.key'
pemClient = 'client.pem'
server_socket1 = socket.socket(socket.AF_INET, socket.SOCK_STREAM,0)
server_socket1.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_socket1.bind((HOST, PORT))
server_socket1.listen()
#client, fromaddr = server_socket.accept()
##client_socket, client_address = server_socket1.accept()
##server_socket =ssl.wrap_socket(client_socket, server_side=True, ca_certs=pemClient, certfile=pemServer,
## keyfile=keyServer, cert_reqs=ssl.CERT_REQUIRED,
## ssl_version=ssl.PROTOCOL_SSLv23)
#self.socket_list= [self.server_socket]
#return self.server_socket
socket_list = [server_socket1]
def receive_message(socket_list):
#socket_list = [server_socket]
try:
message = socket_list.recv(LENGTH)
if not len(message):
return False
message = message.decode('utf-8')
#print( {"data": message})
return {"data": message}
except:
return False
# def mmo(self):
while True:
read_sockets, _, execption_sockets = select.select(socket_list, [], socket_list)
#clients = {}
for notified_socket in read_sockets:
#if notified_socket == server_socket:
client_socket, client_address = server_socket1.accept()
server_socket =ssl.wrap_socket(client_socket, server_side=True, ca_certs=pemClient, certfile=pemServer,
keyfile=keyServer, cert_reqs=ssl.CERT_REQUIRED,
ssl_version=ssl.PROTOCOL_SSLv23)
if notified_socket == server_socket:
# with context.wrap_socket(server_socket, server_side=True) as server_socket:
#client_socket, client_address = self.server_socket.accept()
#start_time= time.time()
device = server_socket.receive_message()
print(device)
if device is False:
continue
msg = literal_eval(device["data"])
socket_list.append(client_socket)
save_msg = {"data":msg}
clients[client_socket] = save_msg
print("New connection from {client_address[0]}:{client_address[1]} device id:{msg[0]}, device type:{msg[1]}, key:{msg[2]}")
client_socket.sendall(b"connection success!\n")
#end_time = time.time()
#end_time = time.perf_counter_ns()
#print(start_time)#start time
#print(end_time) #end time
#print((end_time - start_time)) #print(elapse_time)
#client_socket.sendall(b"hello from server\n")
else:
#start_time= time.time()
clients = {}
message = receive_message(notified_socket)
client = clients[notified_socket]['data'][0]
if message is False:
print("Closed connection from {client}")
socket_list.remove(notified_socket)
del clients[notified_socket]
continue
else:
device_overall_msg = literal_eval(message["data"])
print(device_overall_msg);
from_device_id = device_overall_msg[0]
to_device_id = device_overall_msg[1]
sent_message = device_overall_msg[2]
from_device_key = ''
to_device_key = ''
from_device_type = ''
to_device_type = ''
from_socket = ''
to_socket = ''
print("Received message from {from_device_id} to {to_device_id}, message: {sent_message}")
for client_socket in clients:
saved_device = clients.get(client_socket)
device_data = saved_device["data"]
if device_data[0] == from_device_id:
from_device_type = device_data[1]
from_device_key = device_data[2]
from_socket = client_socket
elif device_data[0] == to_device_id:
to_device_type = device_data[1]
to_device_key = device_data[2]
to_socket = client_socket
if from_device_key == to_device_key and from_device_type == 'phone' and to_device_type == 'raspberrypi':
to_socket.send(bytes(sent_message,'utf-8'))
from_socket.send(b"data sent!\n")
else:
from_socket.send(b"device not connected with the server\n")
for notified_socket in execption_sockets:
socket_list.remove(notified_socket)
del clients[notified_socket]
## def main(self):
## self.mmo()
#if __name__ == "__main__":
#objName = Shape(server_socket1,server_socket,socket_list)
# objName.main()
|
11,919 | d627839f91f7399985501b7bbff913b9843273b1 | import numpy as n
import matplotlib.pyplot as pyp
from DMClasses import *
from DMInteractions import *
from DMPlots import *
pyp.rcParams['axes.grid'] = True
pyp.rcParams.update({'font.size': 16})
pyp.rc('text', usetex=True)
pyp.rc('font', family='serif')
LZ_target = Target( 131.293 , 54.0 , 7.0e3 , 1.0 , "Xe", 4.7808 )
LZ_target.FF_type = 5
plot_mass_DM1 = 50.0 # GeV/c^2
plot_mass_DM2 = 10000.0 # GeV/c^2
plot_xsec_DM = 1.0e-45 # cm^2
plot_DM1 = DarkMatter(plot_mass_DM1, plot_xsec_DM)
plot_DM1.HaloModel.Model = 1
plot_DM2 = DarkMatter(plot_mass_DM2, plot_xsec_DM)
plot_DM2.HaloModel.Model = 1
plot_Erange = n.logspace(start=-1.0, stop=3.0, num=1000)
plot_diffrate_1 = n.zeros(len(plot_Erange))
plot_diffrate_2 = n.zeros(len(plot_Erange))
for i in n.arange(len(plot_Erange)):
plot_diffrate_1[i] = DifferentialRate(plot_Erange[i], LZ_target, plot_DM1) * 3600. * 24.5
plot_diffrate_2[i] = DifferentialRate(plot_Erange[i], LZ_target, plot_DM2) * 3600. * 24.5
## == Plot differential rate for specific mass
pyp.figure()
ax1 = pyp.gca()
ax1.set_xscale('log')
ax1.set_yscale('log')
pyp.plot(plot_Erange , plot_diffrate_1 , label="$M_{\chi} = $"+str(plot_mass_DM1))
pyp.plot(plot_Erange , plot_diffrate_2 , label="$M_{\chi} = $"+str(plot_mass_DM2))
pyp.ylim([1.0e-12 , 1.0e-3])
pyp.xlabel("Recoil Energy [keV]")
pyp.ylabel("Differential Rate [day$^{-1}$ kg$^{-1}$ keV$^{-1}$]")
pyp.title("Differential rate for $\sigma_n=$"+str(plot_xsec_DM)+"cm$^2$")
pyp.legend(loc='lower left')
pyp.show() |
11,920 | 791286ee828dfd0252946ee6a8b56a49c2e61cde | from selenium.webdriver.common.by import By
from base.base_action import BaseAction
class SavedContactPage(BaseAction):
name_title = By.ID, "com.android.contacts:id/large_title"
def get_name_title_text(self):
return self.get_text(self.name_title) |
11,921 | 976713fa0ede53808e7a2f80ea4286f9672e3b69 | import pickle
def list_pickler(path, my_list):
f = open(path, 'wb')
pickle.dump(test_list, f)
f.close()
def unpickler(path):
f = open(path, 'rb')
test_list = pickle.load(f)
f.close()
return test_list |
11,922 | 2f20b6fcc270e4ff59028e7cfd9d4254f375c96b |
import FWCore.ParameterSet.Config as cms
from UACastor.CastorTree.TightPFJetID_Parameters_cfi import TightPFJetID_Parameters as TightPFJetID_Parameters_Ref
from UACastor.CastorTree.LooseCaloJetID_Parameters_cfi import LooseCaloJetID_Parameters as LooseCaloJetID_Parameters_Ref
from UACastor.CastorTree.TightCaloJetID_Parameters_cfi import TightCaloJetID_Parameters as TightCaloJetID_Parameters_Ref
process = cms.Process("Analysis")
process.load("FWCore.MessageService.MessageLogger_cfi")
process.maxEvents = cms.untracked.PSet(input = cms.untracked.int32(2000))
process.MessageLogger.cerr.FwkReport.reportEvery = 100
process.options = cms.untracked.PSet(wantSummary = cms.untracked.bool(True))
process.source = cms.Source("PoolSource",
# replace 'myfile.root' with the source file you want to use
fileNames = cms.untracked.vstring(
#'dcap://maite.iihe.ac.be/pnfs/iihe/cms/store/user/hvanhaev/CastorCollisionData/CastorCollisionData_MinimumBias09_RAWRECO_GoodLumiSections_1.root'
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/144C9D9C-F809-DF11-A127-0026189438C2.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/F6FBE10D-B609-DF11-B3DD-0026189438B9.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/9AB142AB-B309-DF11-9DD8-002618943984.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/A24C858F-F809-DF11-951D-0026189438F4.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/C2E8DF18-B809-DF11-B546-0026189438D2.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/F08301C3-B409-DF11-82B9-002618943821.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/E2E7CEA4-B409-DF11-BA24-002618943943.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/C2D05EA0-B509-DF11-9768-002618943971.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/B2368A0A-B609-DF11-8780-002618943858.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/A42C8652-B209-DF11-AF33-00261894397D.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/9EA31688-F809-DF11-9920-002618B27F8A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/B6578F54-BA09-DF11-9391-002618943933.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/76524E2C-B909-DF11-B97B-0026189438F5.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/D4EE6FA2-B309-DF11-A93C-00261894389A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/9608FC29-B509-DF11-BC3C-002618943958.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/88891A4B-B709-DF11-A232-00261894388B.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/3C59BF00-B609-DF11-9766-002618943885.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/3268AC4D-B309-DF11-87EA-00261894389E.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/2E097909-B609-DF11-9F75-0026189438DA.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/1404784B-B709-DF11-8597-00261894382A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/0E1D25C5-B409-DF11-A1C0-00261894392B.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/0A34CEA5-B309-DF11-8F8F-0026189438BA.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/065A0459-B709-DF11-81BD-00261894398A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/92E7B756-BA09-DF11-A33A-00261894397D.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/56E8961E-B809-DF11-A90C-00261894389F.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/D6A66F04-B609-DF11-95E5-002618943876.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/C2B5A330-B509-DF11-B804-002618943870.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/B6155EC3-B409-DF11-BACA-00261894396A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/8234EE0A-B609-DF11-9FB1-002618943947.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/7874B64B-B309-DF11-801D-00261894393C.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/4A0228C4-B409-DF11-A4C6-002618943916.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/3E3309A6-B309-DF11-9F6B-002618943951.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/B2E13698-F809-DF11-BF76-002618943966.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/BCBB4746-BA09-DF11-B0E9-0026189438BA.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/A83F444B-B709-DF11-80B5-002618943856.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/9290C6E5-B409-DF11-8C68-002618943865.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/8E3A4501-B609-DF11-B804-002618943964.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/8A743BE8-B609-DF11-B300-00261894386A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/324A45ED-B609-DF11-B52B-0026189438DC.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/462518A3-F809-DF11-B40F-002618B27F8A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/5CD48032-B909-DF11-8094-002618943811.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/54F6FB25-B809-DF11-9E97-0026189437FC.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/EA0A909F-B409-DF11-93C2-00261894391D.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/C4E6B6EA-B409-DF11-87D8-0026189438D8.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/BAA00BB4-B409-DF11-B80F-0026189438E3.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/A897DB5B-B209-DF11-8EA5-002618943976.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/88DD9CA0-B309-DF11-A761-002618943916.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/66386C09-B609-DF11-BB59-00261894384F.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/3E4316E5-B609-DF11-8D15-002618943981.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/30BC4717-B109-DF11-9D25-002618943821.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/307DF94A-B009-DF11-82F4-00261894397A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/02635E00-B609-DF11-ADB6-0026189438A9.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/007657A1-B509-DF11-AAAB-00261894396A.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0015/5A962FA7-F809-DF11-93A7-0026189438BF.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0014/98A7C34B-B709-DF11-93CA-002618943838.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/9EC45AA2-B509-DF11-960E-00261894392B.root',
'/store/data/BeamCommissioning09/MinimumBias/RAW-RECO/BSCNOBEAMHALO-Jan23Skim-v1/0013/78D1C2EF-B409-DF11-8A74-002618943921.root'
),
lumisToProcess = cms.untracked.VLuminosityBlockRange(
'124009:1-124009:68'
#'124020:12-124020:94',
#'124022:69-124022:160',
#'124023:41-124023:96',
#'124024:2-124024:83',
#'124027:24-124027:39',
#'124030:1-124030:31'
)
)
# magnetic field
process.load('Configuration.StandardSequences.MagneticField_38T_cff')
# configure HLT
process.load('L1TriggerConfig.L1GtConfigProducers.L1GtTriggerMaskTechTrigConfig_cff')
process.load('HLTrigger/HLTfilters/hltLevel1GTSeed_cfi')
process.hltLevel1GTSeed.L1TechTriggerSeeding = cms.bool(True)
process.hltLevel1GTSeed.L1SeedsLogicalExpression = cms.string('0 AND((40 OR 41) AND NOT (36 OR 37 OR 38 OR 39))')
# require physics declared
process.physDecl = cms.EDFilter("PhysDecl",applyfilter = cms.untracked.bool(True))
# require primary vertex
process.oneGoodVertexFilter = cms.EDFilter("VertexSelector",
src = cms.InputTag("offlinePrimaryVertices"),
cut = cms.string("!isFake && ndof >= 5 && abs(z) <= 15 && position.Rho <= 2"), # tracksSize() > 3 for the older cut
filter = cms.bool(True), # otherwise it won't filter the events, just produce an empty vertex collection.
)
# selection on the rate of high purity tracks (scraping events rejection)
process.noscraping = cms.EDFilter("FilterOutScraping",
applyfilter = cms.untracked.bool(True),
debugOn = cms.untracked.bool(False),
numtrack = cms.untracked.uint32(10),
thresh = cms.untracked.double(0.25)
)
# communicate with the DB
process.load('Configuration.StandardSequences.Services_cff')
process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')
process.GlobalTag.globaltag = 'GR_R_310_V2::All' # to be used for reprocessing of 2009 and 2010 data (update JEC to Spring10 V8)
# import the JEC services
process.load('JetMETCorrections.Configuration.DefaultJEC_cff')
# I guess I should not use this old staff
# process.load("JetMETCorrections.Configuration.L2L3Corrections_900GeV_cff")
# data reconstruction starts from raw
process.load("Configuration.StandardSequences.RawToDigi_cff") //-- redo digi from raw data (digi not kept)
process.load("Configuration.StandardSequences.Reconstruction_cff") //-- redo rechit
process.castorDigis.InputLabel = 'source'
process.load("RecoLocalCalo.Castor.CastorCellReco_cfi") //-- redo cell
process.load("RecoLocalCalo.Castor.CastorTowerReco_cfi") //-- redo tower
process.load("RecoJets.JetProducers.ak7CastorJets_cfi") //-- redo jet
process.load("RecoJets.JetProducers.ak7CastorJetID_cfi") //-- redo jetid
# Final Tree
process.TFileService = cms.Service("TFileService",fileName = cms.string("Castor_900GeV_data_124009.root"))
# Event Reconstruction (need to be updated)
process.castortree = cms.EDAnalyzer('CastorTree',
StoreGenKine = cms.untracked.bool(True),
StoreGenPart = cms.untracked.bool(True),
StoreCastorDigi = cms.untracked.bool(True),
StoreCastorJet = cms.untracked.bool(True),
# input tag for L1GtTriggerMenuLite retrieved from provenance
L1GT_TrigMenuLite_Prov = cms.bool(True),
# input tag for L1GtTriggerMenuLite explicitly given
L1GT_TrigMenuLite = cms.InputTag('l1GtTriggerMenuLite'),
L1GT_ObjectMap = cms.InputTag('hltL1GtObjectMap','','HLT'),
hepMCColl = cms.InputTag('generator','','HLT'),
genPartColl = cms.InputTag('genParticles','','HLT'),
CastorTowerColl = cms.InputTag('CastorTowerReco', '','Analysis'),
CastorDigiColl = cms.InputTag('castorDigis', '','Analysis'),
CastorRecHitColl = cms.InputTag('castorreco','','Analysis'),
BasicJet = cms.InputTag('ak7BasicJets','','Analysis'),
CastorJetID = cms.InputTag('ak7CastorJetID','','Analysis'),
PFJetColl = cms.InputTag('ak5PFJets', '', 'RECO'),
PFJetJEC = cms.string('ak5PFL2L3Residual'), # L2L3Residual JEC should be applied to data only
PFJetJECunc = cms.string('AK5PF'),
CaloJetColl = cms.InputTag('ak5CaloJets','','RECO'),
CaloJetId = cms.InputTag('ak5JetID','','RECO'),
CaloJetJEC = cms.string('ak5CaloL2L3Residual'), # L2L3Residual JEC should be applied to data only
CaloJetJECunc = cms.string('AK5Calo'),
CaloTowerColl = cms.InputTag('towerMaker','','RECO'),
TightPFJetID_Parameters = TightPFJetID_Parameters_Ref,
LooseCaloJetID_Parameters = LooseCaloJetID_Parameters_Ref,
TightCaloJetID_Parameters = TightCaloJetID_Parameters_Ref,
JetPtCut = cms.double(8.0), # Jet Pt > 8 GeV at 900 GeV and 2.36 TeV
JetEtaCut = cms.double(2.5)
//c requested_hlt_bits
)
# list of processes
process.p = cms.Path(process.physDecl*process.hltLevel1GTSeed*process.oneGoodVertexFilter*process.noscraping
*process.castorDigis*process.castorreco*process.CastorCellReco*process.CastorTowerReco*process.ak7BasicJets*process.ak7CastorJetID
*(process.pfinclusivejetSelector+process.pfdijetSelector)*process.castortree)
|
11,923 | 609a41e728ada727fcd6cb48e536a7256c8d9c0d | from rest_framework import mixins, status
from rest_framework.viewsets import GenericViewSet
from rest_framework.exceptions import PermissionDenied
from rest_framework.response import Response
from tcas.models import Contribution, Team, TeamMember
from tcas.serializers import ContributionSerializer, ContributionCreateSerializer
from tcas.permissions import IsInCurrentTeam, IsLogin
from .generic import PermissionDictMixin
from django_filters import rest_framework as filters
class ContributionFilter(filters.FilterSet):
team = filters.ModelChoiceFilter(field_name='team_member__team', queryset=Team.objects.all())
class Meta:
model = Contribution
fields = ['submission', 'team']
class ContributionViewSet(PermissionDictMixin, mixins.ListModelMixin, mixins.CreateModelMixin, GenericViewSet):
queryset = Contribution.objects.all()
pagination_class = None
filterset_class = ContributionFilter
permission_dict = {
'list': [IsInCurrentTeam],
'create': [IsLogin], # Need to validate if current user is the team leader, which is validated in view function
}
def get_serializer_class(self):
if self.action == 'list':
return ContributionSerializer
return ContributionCreateSerializer
def get_serializer(self, *args, **kwargs):
if self.action == 'create':
kwargs['many'] = True
return super().get_serializer(*args, **kwargs)
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
serializer.is_valid(raise_exception=True)
# Validate if current user is the team leader
team = serializer.validated_data[0]['team']
if team.leader != request.user:
raise PermissionDenied
for item in serializer.validated_data:
if team != item['team']:
raise PermissionDenied
contributions = []
for item in serializer.validated_data:
team_member = TeamMember.objects.get(team=team, user=item['member'])
contribution, created = Contribution.objects.get_or_create(
submission=item['submission'],
team_member=team_member,
defaults={'level': item['level']},
)
if not created:
contribution.level = item['level']
contribution.save()
contributions.append(contribution)
return Response(ContributionSerializer(contributions, many=True).data, status=status.HTTP_201_CREATED)
|
11,924 | b578df1feee0a97e881879de88c40fd4d4854a5b | __author__= "shifeng007" |
11,925 | d3f63588004914e0c5aeb88e18108aa355444991 | import demjson
import json
# From Python to JSON
a = demjson.encode(['one', 42, True, None])
print("a =", a)
# a = "asdfasd" # 不是json格式的也ok, 所以可以理解为这个 demjson 简单理解为可以储存读取文件
json = json.dumps(a)
print("json =", json)
# From JSON to Python
b = demjson.decode('["one",42,true,null]')
print("b =", b)
# 将a对象,写到 a.json 文件中
demjson.encode_to_file("a.json", a, overwrite=True)
# 从 b.json 文件中 读取数据
decode_b = demjson.decode_file("a.json")
print("decode_b =", decode_b)
|
11,926 | 1f5b23472cc1d75e1ce34d2cddd51505c458007e | import requests
from bs4 import BeautifulSoup
URL = 'https://www.zimmersdaka90.co.il' # Define URL
page = requests.get(URL)
soup = BeautifulSoup(page.content, 'html.parser')
results = soup.find(class_='boxWrapper')
obj_elemnts = results.find_all('div', class_='recBox')
for obj_element in obj_elemnts:
title_element = obj_element.find('a', class_='vilaName')
location_element = obj_element.find('div', class_='vilaArea')
phoneNum_element = obj_element.find('div', class_='phoneNum')
if None in (title_element, location_element, phoneNum_element):
continue
print(title_element.text)
print(location_element.text)
print(phoneNum_element.text)
"""browser.execute_script("window.scrollTo(0, document.body.scrollHeight);")
time.sleep(2)
""" |
11,927 | bfbf88d106fe9100d0c42a95fb4759fdd87c79f4 | #!/usr/bin/env python
## -*- coding: cp437 -*-
#What a mess...
import logging
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
try:
from scapy.all import *
except ImportError:
print("[!]Install scapy to use this module.")
exit()
from datetime import datetime
import Queue
import threading
import sys
src_port = RandShort()
dst_ip = sys.argv[1]
port_range = sys.argv[2]
ping = sys.argv[3]
cool = False
toscan = []
for i in port_range.split(','):
try:
if '-' not in i:
toscan.append(int(i))
else:
l, h = map(int, i.split('-'))
toscan += range(l, h+1)
except ValueError:
print("[-]Invalid characters in port list.")
q = Queue.Queue()
random.shuffle(toscan)
ports = []
open_ports = []
closed = []
TCP_REVERSE = dict((TCP_SERVICES[k], k) for k in TCP_SERVICES.keys())
def scan_port(dst_ip, src_port, dest_port):
try:
if dest_port == "LAST":
time.sleep(1)
return("LAST")
resp = sr1(IP(dst=dst_ip)/TCP(sport=src_port, dport=dest_port, flags="S"), timeout=3, verbose=0)
if cool:
print("\033[92m"+resp.summary())
if not resp or resp is None:
return("CLOSED")
elif(resp.haslayer(TCP)):
if(resp.getlayer(TCP).flags == 0x12):
sr1(IP(dst=dst_ip)/TCP(sport=src_port, dport=dest_port, flags="AR"), timeout=0.05, verbose=0)
return(dest_port)
elif(resp.getlayer(TCP).flags == 0x14):
return("CLOSED")
except:
return("CLOSED")
def scan(q, d, s, de):
q.put(scan_port(d, s, de))
try:
if ping != "false":
packet = IP(dst=dst_ip)/ICMP()
ans = sr1(packet, timeout=5, verbose=0)
if not ans:
print("[-]Target unreachable.")
exit()
else:
print("[*]Skipping ping.")
print("[*]Starting scan...")
t1 = datetime.now()
last = len(toscan) - 1
for index, dest_port in enumerate(toscan):
if index%100 == 0:
if int((float(index)/len(toscan))*100) != 0:
print("[*]Scan {0:n}".format(int((float(index)/len(toscan))*100))+"% complete.")
t = threading.Thread(target=scan, args=(q, dst_ip, src_port, dest_port))
t.start()
if index == last:
t = threading.Thread(target=scan, args=(q, dst_ip, src_port, "LAST"))
t.start()
print("[*]Scan finished, waiting for threads to return.")
while True:
s = q.get()
if s:
ports.append(s)
if s == "LAST":
break
t2 = datetime.now()
time = t2-t1
ports.sort()
for port in toscan:
closed.append(port)
for port in ports:
if port <> None and port <> "CLOSED":
open_ports.append(port)
try:
closed.remove(port)
except:
pass
else:
pass
if open_ports[0] == 'LAST':
print("[*]All scanned ports are closed.")
print("[*]Scanned "+str(last+1)+" ports in "+str(time)+".")
exit()
print("[*]┌────────────────────────────┐")
print("[*]│{}{}{} │".format("PORT".ljust(10),"STATE".ljust(6), "SERVICE"))
print("[*]├────────────────────────────┤")
for port in open_ports:
if port <> 'LAST':
try:
print("[*]│{}{}{}│".format(str(port).ljust(10),"OPEN".ljust(6), TCP_REVERSE[port].ljust(12)))
except:
print("[*]│{}{}{}│".format(str(port).ljust(10),"OPEN".ljust(6), "unknown".ljust(12)))
print("[*]└────────────────────────────┘")
print("[*]"+str(len(closed))+" closed port(s) not shown.")
print("[*]Scanned "+str(last+1)+" port(s) in "+str(time)+".")
except KeyboardInterrupt:
print("[*]User requested shutdown")
exit()
|
11,928 | da008d88283898b7d075806a91bc11243cb9e302 | import frappe
from frappe.model.naming import make_autoname
# from frappe.permissions import add_user_permission
def autoname(doc, event):
doc.name = make_autoname("PHY-.#####")
def validate(doc, event):
doc.full_name = "{first_name} {last_name}".format(**doc.as_dict())
def after_insert(doc, event):
create_user(doc, event)
create_dgii(doc, event)
create_defaults(doc, event)
add_permissions(doc, event)
def create_user(doc, event):
if not doc.email:
frappe.throw("Please add an email address for this physician")
if frappe.db.exists("User", doc.email):
frappe.throw("User email already exists!")
usr = frappe.new_doc("User")
usr.update({
u'background_image': '/files/estheto-board-lowquality-mono.jpg',
u'background_style': u'Fill Screen',
u'email': doc.email,
u'enabled': 1,
u'role_profile_name': 'Physician',
u'first_name': doc.first_name,
u'full_name': doc.full_name,
u'language': u'es',
u'last_name': doc.last_name,
u'new_password': 'Admin',
u'user_type': 'System User',
u'send_welcome_email': 0,
u'user_type': u'System User',
u'username': generate_username(doc)
})
usr.save(ignore_permissions=True)
def create_dgii(doc, event):
import datetime
if frappe.db.exists("DGII Settings", doc.name):
frappe.throw("DGII Settings already exists!")
year = int(frappe.utils.today()[:4])
lst = [
{
u'current': 1,
u'description': u'Facturas con Valor Fiscal',
u'expiration': datetime.date(year, 12, 31),
u'max': 100,
u'ncf_type': u'B01.########',
},
{
u'current': 1,
u'description': u'Facturas de Consumo',
u'expiration': datetime.date(year, 12, 31),
u'max': 1000,
u'ncf_type': u'B02.########',
},
{
u'current': 1,
u'description': u'Notas de Credito',
u'expiration': datetime.date(year, 12, 31),
u'max': 50,
u'ncf_type': u'B04.########',
},
{
u'current': 1,
u'description': u'Gubernamental',
u'expiration': datetime.date(year, 12, 31),
u'max': 300,
u'ncf_type': u'B15.########',
},
{
u'current': 1,
u'description': u'Regimen Especial',
u'expiration': datetime.date(year, 12, 31),
u'max': 100,
u'ncf_type': u'B14.########',
}
]
dgii = frappe.new_doc("DGII Settings")
dgii.update({
u'physician': doc.name,
u'physician_name': doc.full_name
})
for ncf in lst:
dgii.append('ncf', ncf)
dgii.save(ignore_permissions=True)
def create_defaults(doc, event):
if frappe.db.exists("Physician Defaults", doc.name):
frappe.throw("Physician Defaults already exists!")
default = frappe.new_doc("Physician Defaults")
default.update({
"physician": doc.name,
"physician_name": doc.full_name,
"default_coverage": 80.0,
})
default.save(ignore_permissions=True)
def add_permissions(doc, event):
lst = ["Physician Defaults", "DGII Settings", "Physician"]
for doctype in lst:
add_user_permission(doctype, doc.name, doc.email)
add_user_permission("Clinic", doc.hospital, doc.email)
def generate_username(doc):
return "{}_{}".format(doc.first_name.capitalize(), doc.last_name.capitalize())
def add_user_permission(doctype, name, user, apply=False):
'''Add user permission'''
from frappe.core.doctype.user_permission.user_permission import get_user_permissions
if name not in get_user_permissions(user).get(doctype, []):
if not frappe.db.exists(doctype, name):
frappe.throw(_("{0} {1} not found").format(_(doctype), name), frappe.DoesNotExistError)
frappe.get_doc(dict(
doctype='User Permission',
user=user,
allow=doctype,
for_value=name,
apply_for_all_roles=apply
)).save(ignore_permissions=True) |
11,929 | 4fa1f0b53c172d48ab00d57a023b87fc34dc6ccc | def format_float(number):
return float(format(number, '.2f'))
def calc_years(seconds, orb_period = 1):
period = 365.25 * orb_period
minutes = seconds / 60
hours = minutes / 60
days = hours / 24
years = days/period
return years
class SpaceAge(object):
def __init__(self, seconds):
self.seconds = seconds
def on_earth(self):
return format_float(calc_years(self.seconds))
def on_mercury(self):
return format_float(calc_years(self.seconds, 0.2408467))
def on_venus(self):
return format_float(calc_years(self.seconds, 0.61519726))
def on_mars(self):
return format_float(calc_years(self.seconds, 1.8808158))
def on_jupiter(self):
return format_float(calc_years(self.seconds, 11.862615))
def on_saturn(self):
return format_float(calc_years(self.seconds, 29.447498))
def on_uranus(self):
return format_float(calc_years(self.seconds, 84.016846))
def on_neptune(self):
return format_float(calc_years(self.seconds, 164.79132))
|
11,930 | 69bd5d2dcd620fe5e4ce3c7bdbfe6e532a51b79b | """
Metropolis-Hastings 采样算法解决了Metropolis要求变量分布对称性的问题
也可以将Metropolis看作是Metropolis-Hastings的特殊情况,即q_{ij} = q_{ji}
测试Metropolis-Hastings 算法对多变量分布采样
对多变量分布采样有两种方法:BlockWise和ComponentWise
BlockWise: 需要与样本属性数量相同的多变量分布,每次生成一条数据
ComponentWise:每次生成一条数据的一个属性,相较于BlockWise没有前提要求
参考:https://blog.csdn.net/google19890102/article/details/51785156
这个代码有点问题,在于计算alpha时并没有用到q分布,也就是忽略了q_{ij} 和 q_{ji}
"""
# !/usr/bin/python
# -*- coding: utf-8 -*-
import numpy as np
from matplotlib.font_manager import FontProperties
import matplotlib.pyplot as plt
import seaborn as sns
import math
sns.set(context='paper', style='ticks')
zh_font = FontProperties(fname=r'C:\Windows\Fonts\simhei.ttf')
def biv_exp(theta1, theta2):
lam1 = 0.5
lam2 = 0.1
lam = 0.01
max_val = 8
y = math.exp(-(lam1 + lam) * theta1 - (lam2 + lam) * theta2 - lam * max_val)
return y
def block_wise_sampling():
T = 10000
theta = np.zeros((T, 2))
theta_min = 0
theta_max = 8
theta[0] = np.random.uniform(theta_min, theta_max, size=(1, 2))
for t in range(1, T):
# 直接生成一条数据的所有变量
theta_hat = np.random.uniform(theta_min, theta_max, size=2)
alpha = min(1.0, biv_exp(*theta_hat) / biv_exp(*theta[t - 1]))
u = np.random.uniform(0, 1)
theta[t] = np.array(theta_hat) if u <= alpha else np.array(theta[t-1])
return theta
def component_wise_sampling():
T = 10000
theta = np.zeros((T, 2))
theta_min = 0
theta_max = 8
theta[0] = np.random.uniform(theta_min, theta_max, size=(1, 2))
for t in range(1, T):
for i in range(theta.shape[-1]):
# 每次只产生一个属性的值
theta_hat = np.random.uniform(theta_min, theta_max, size=1)
theta_tmp = np.array(theta[t-1])
theta_tmp[i] = theta_hat
# 注意此时计算alpha,分子的参数只改变当前属性的值,其余值不变
alpha = min(1.0, biv_exp(*theta_tmp) / biv_exp(*theta[t-1]))
u = np.random.uniform(0, 1)
theta[t][i] = np.array(theta_hat) if u <= alpha else np.array(theta[t - 1][i])
return theta
def draw(theta, method_name):
num_bins = 50
plt.figure()
plt.subplot(221)
plt.plot(theta[:, 0], color='green')
plt.title('%s采样状态转移图-1' % method_name, fontproperties=zh_font)
plt.subplot(222)
plt.hist(theta[:, 0], num_bins, density=1, alpha=0.5, facecolor='red')
plt.title('%s采样状态分布直方图-1' % method_name, fontproperties=zh_font)
plt.subplot(223)
plt.plot(theta[:, 1], color='green')
plt.title('%s采样状态转移图-2' % method_name, fontproperties=zh_font)
plt.subplot(224)
plt.hist(theta[:, 1], num_bins, density=1, alpha=0.5, facecolor='red')
plt.title('%s采样状态分布直方图-2' % method_name, fontproperties=zh_font)
plt.tight_layout(True)
if __name__ == '__main__':
theta_1 = block_wise_sampling()
draw(theta_1, 'BlockWise')
theta_2 = component_wise_sampling()
draw(theta_2, 'ComponentWise')
plt.show()
|
11,931 | 2eb8f7bf18b9b50cbd74ab542ea775a861431536 | import numpy as np
attribute_File_Path = "D:/Simulation_Raw_Data/CelebA/list_attr_celeba.txt"
image_Files_Dir = "D:/Simulation_Raw_Data/CelebA/img"
select_Attribute_List = ["Black_Hair", "Blond_Hair", "Brown_Hair", "Male", "Young", "Pale_Skin"] #If 'None', model use all attributes
image_Size = 128
initial_Filter_Count = 64 #For bottleneck
batch_Size = 16
gan_Loss_Type = 'WGAN' #'WGAN' or 'LSGAN'
learning_Rate = 0.0001
extract_Dir = 'E:/GAN_Result/StarGAN'
test_File_Name_List = [
'D:/Simulation_Raw_Data/CelebA/Img/188712.jpg',
'D:/Simulation_Raw_Data/CelebA/Img/188673.jpg',
'D:/Simulation_Raw_Data/CelebA/Img/189052.jpg',
'D:/Simulation_Raw_Data/CelebA/Img/201436.jpg',
'D:/Simulation_Raw_Data/CelebA/Img/201881.jpg',
'D:/Simulation_Raw_Data/CelebA/Img/201434.jpg',
]
test_Attribute_Pattern_List = [
np.array([1, 0, 0, 0, 0, 0]),
np.array([0, 1, 0, 0, 0, 0]),
np.array([0, 0, 1, 0, 0, 0]),
np.array([0, 0, 0, 1, 0, 0]),
np.array([0, 0, 0, 0, 1, 0]),
np.array([0, 0, 0, 0, 0, 1]),
np.array([0, 1, 0, 1, 0, 0]),
np.array([0, 1, 0, 0, 1, 0]),
np.array([0, 0, 0, 1, 1, 0]),
np.array([0, 0, 1, 1, 1, 0]),
] #Each select_Attribute_List
test_Step = 1000
checkpoint_Step = 1000 |
11,932 | d6d25d64172f133f7259f62ca1583f7a506f5a20 | from django.dispatch import receiver
from .models import User, Profile
from django.db.models.signals import (post_save, post_delete)
from.task import one_sending
from django.shortcuts import reverse
@receiver(post_save, sender=User)
def user_post_save(sender, instance, created, *args, **kwargs):
if created:
if not instance.is_verified:
text_to_send = 'Follow this link to verify your account: http://127.0.0.1:8000%s' \
% reverse('users-verify', kwargs={'uuid': str(instance.verification_uuid)})
one_sending(subject="Account verification",
text=text_to_send,
email=instance.email)
Profile.objects.create(user=instance)
@receiver(post_save, sender=User)
def user_save(sender, instance, **kwargs):
instance.profile.save()
@receiver(post_delete, sender=User)
def user_delete_signal(sender, instance, **kwargs):
print('User has been deleted')
|
11,933 | 178f859a0973e968c301570e976c6b678d3b4ab4 | # -*- coding: utf-8 -*-
"""trapezoidal.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/14FqIk6y_oVN6rLuk4jRLcSCQfweSmHcX
"""
############******* Integration of a function using Trapezoidal method *********#############
#This module contains other modules such as numpy, math, matplotlib.pyplot as np,m,plt respectiely.
#The module contains two function definitions : 1. trapezoidal and 2. Test
# The arguments of the function definition trapezoidal are:
# f (The function which is to be integrated)
# a,b (Initial and final values of integration)
# h (Stepsize of integration)
# hvar (for different values of h, hvar=1)
# pltn (for plotting pltn=0)
###########**************Steps involved in the integration ************#################
# Import the module trapezoidal.py
# Define the function for integration
# Give the value of a,b,h,hvar,pltn as required
# Call the function trapezoidal(f,a,b,h,hvar,pltn)
# Call the function Test(f,a,b)
##########************ A sample example for using the module *************#################
# from trapezoidal import *
# def f(x):
# return (3*x+5)
# a = 1
# b = 10
# h = 1
# hvar = 0
# pltn = 1
# trapezoidal(f,a,b,h,hvar,pltn)
# Test(f,a,b)
##################################----------------------------###########################
import matplotlib.pyplot as plt
import math as m
import numpy as np
def trapezoidal(f,a,b,h,hvar,pltn): # Function for estimating the integral
try:
z =[0] # Stores the xi appended from for loop as list
sum = 0
N = int((b-a)/h) # The interval is divided into N subintervals
out = open("trapezoidal.txt", "w") # Generates output file
for i in range (1,N):
x = a + h*i
z.append(x)
sum = sum+2*f(x)
sum = sum + f(a) + f(b)
Answer = (h/2)*sum
z.remove(0)
w = np.array(z) # Converting list z to array w
if pltn == 1: # Plot condition for plotting the function
plt.plot(w,f(w))
plt.xlabel('x')
plt.ylabel('f(x)')
plt.title('Plot of given funtion for integration')
plt.show() # Shows the plot of the function
if hvar == 1: # Condition for different values of h
print(' '*(15-len('Stepsize'))+'Stepsize' + ' ' * (15-len('Integration')) + ' Integration',file = out)
for i in range(5):
sum = 0
N = int((b-a)/h)
for i in range (1,N):
x = a + h*i
sum = sum+2*f(x)
sum = sum + f(a) + f(b)
Ans = (h/2)*sum
print('{:15.5f}{:15.7f}'.format((100*h),Ans,),file = out)
h = h/10
else:
print('The value of Integration is {:f}'.format(Answer),file=out)
out.close()
except ZeroDivisionError: # When function is undefined at some value
print('Zero Division Error Encountered.')
from scipy.integrate import quad # Estimating the integral by quad inbuilt function as Test
def Test(f,a,b):
out = open("trapezoidal.txt", "w")
print(quad(f,a,b),file = out)
out.close() |
11,934 | 6d9cacfa3646743c16c5f51f1468e2f6a09e44c3 | #
# 7/2023: allowing an optional weight column in the input data file
# improving encapsulation of functions
# Help to find continuous spectrum
# March 2019 major update:
# (i) added plateau modulus G0 (also in pyReSpect-time) calculation
# (ii) following Hansen Bayesian interpretation of Tikhonov to extract p(lambda)
# (iii) simplifying lcurve (starting from high lambda to low)
# (iv) changing definition of rho2 and eta2 (no longer dividing by 1/n and 1/nl)
from common import *
# HELPER FUNCTIONS
# def InitializeH(Gexp, s, kernMat, *argv):
def InitializeH(Gexp, wexp, s, kernMat, *argv):
"""
Function: InitializeH(input)
Input: Gexp = n*1 vector [Gt],
wexp = n*1 weight vector,
s = relaxation modes,
kernMat = matrix for faster kernel evaluation
G0 = optional; if plateau is nonzero
Output: H = guessed H
G0 = optional guess if *argv is nonempty
"""
#
# To guess spectrum, pick a negative Hgs and a large value of lambda to get a
# solution that is most determined by the regularization
# March 2019; a single guess is good enough now, because going from large lambda to small
# lambda in lcurve.
H = -5.0 * np.ones(len(s)) + np.sin(np.pi * s)
lam = 1e0
if len(argv) > 0:
G0 = argv[0]
Hlam, G0 = getH(lam, Gexp, wexp, H, kernMat, G0)
return Hlam, G0
else:
Hlam = getH(lam, Gexp, wexp, H, kernMat)
return Hlam
def getAmatrix(ns):
"""Generate symmetric matrix A = L' * L required for error analysis:
helper function for lcurve in error determination"""
# L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
nl = ns - 2
L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
L = L[1:nl+1,:]
return np.dot(L.T, L)
def getBmatrix(H, kernMat, Gexp, wexp, *argv):
"""get the Bmatrix required for error analysis; helper for lcurve()
not explicitly accounting for G0 in Jr because otherwise I get underflow problems"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
r = np.zeros(n); # vector of size (n);
# furnish relevant portion of Jacobian and residual
# Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));
Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)))
Jr = -kernelD(H, kernMat) * Kmatrix;
# if plateau then unfurl G0
if len(argv) > 0:
G0 = argv[0]
# r = (1. - kernel_prestore(H, kernMat, G0)/Gexp)
r = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp)
else:
# r = (1. - kernel_prestore(H, kernMat)/Gexp)
r = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)
B = np.dot(Jr.T, Jr) + np.diag(np.dot(r.T, Jr))
return B
def lcurve(Gexp, wexp, Hgs, kernMat, par, *argv):
"""
Function: lcurve(input)
Input: Gexp = n*1 vector [Gt],
wexp = weights associated with datapoints
Hgs = guessed H,
kernMat = matrix for faster kernel evaluation
par = parameter dictionary
G0 = optionally
Output: lamC and 3 vectors of size npoints*1 contains a range of lambda, rho
and eta. "Elbow" = lamC is estimated using a *NEW* heuristic AND by Hansen method
March 2019: starting from large lambda to small cuts calculation time by a lot
also gives an error estimate
"""
if par['plateau']:
G0 = argv[0]
npoints = int(par['lamDensity'] * (np.log10(par['lam_max']) - np.log10(par['lam_min'])))
hlam = (par['lam_max']/par['lam_min'])**(1./(npoints-1.))
lam = par['lam_min'] * hlam**np.arange(npoints)
eta = np.zeros(npoints)
rho = np.zeros(npoints)
logP = np.zeros(npoints)
H = Hgs.copy()
n = len(Gexp)
ns = len(H)
nl = ns - 2
logPmax = -np.inf # so nothing surprises me!
Hlambda = np.zeros((ns, npoints))
# Error Analysis: Furnish A_matrix
Amat = getAmatrix(len(H))
_, LogDetN = np.linalg.slogdet(Amat)
#
# This is the costliest step
#
for i in reversed(range(len(lam))):
lamb = lam[i]
if par['plateau']:
H, G0 = getH(lamb, Gexp, wexp, H, kernMat, G0)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H, kernMat, G0)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat, G0)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp, G0)
else:
H = getH(lamb, Gexp, wexp, H, kernMat)
# rho[i] = np.linalg.norm((1. - kernel_prestore(H,kernMat)/Gexp))
rho[i] = np.linalg.norm(wexp*(1. - kernel_prestore(H, kernMat)/Gexp))
Bmat = getBmatrix(H, kernMat, Gexp, wexp)
eta[i] = np.linalg.norm(np.diff(H, n=2))
Hlambda[:,i] = H
_, LogDetC = np.linalg.slogdet(lamb*Amat + Bmat)
V = rho[i]**2 + lamb * eta[i]**2
# this assumes a prior exp(-lam)
logP[i] = -V + 0.5 * (LogDetN + ns*np.log(lamb) - LogDetC) - lamb
if(logP[i] > logPmax):
logPmax = logP[i]
elif(logP[i] < logPmax - 18):
break
# truncate all to significant lambda
lam = lam[i:]
logP = logP[i:]
eta = eta[i:]
rho = rho[i:]
logP = logP - max(logP)
Hlambda = Hlambda[:,i:]
#
# currently using both schemes to get optimal lamC
# new lamM works better with actual experimental data
#
# lamC = oldLamC(par, lam, rho, eta)
plam = np.exp(logP); plam = plam/np.sum(plam)
lamM = np.exp(np.sum(plam*np.log(lam)))
#
# Dialling in the Smoothness Factor
#
if par['SmFacLam'] > 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(max(np.log(lam)) - np.log(lamM)));
elif par['SmFacLam'] < 0:
lamM = np.exp(np.log(lamM) + par['SmFacLam']*(np.log(lamM) - min(np.log(lam))));
#
# printing this here for now because storing lamC for sometime only
#
if par['plotting']:
plt.clf()
# plt.axvline(x=lamC, c='k', label=r'$\lambda_c$')
plt.axvline(x=lamM, c='gray', label=r'$\lambda_m$')
plt.ylim(-20,1)
plt.plot(lam, logP, 'o-')
plt.xscale('log')
plt.xlabel(r'$\lambda$')
plt.ylabel(r'$\log\,p(\lambda)$')
plt.legend(loc='upper left')
plt.tight_layout()
plt.savefig('output/logP.pdf')
return lamM, lam, rho, eta, logP, Hlambda
def getH(lam, Gexp, wexp, H, kernMat, *argv):
"""Purpose: Given a lambda, this function finds the H_lambda(s) that minimizes V(lambda)
V(lambda) := ||(Gexp - kernel(H)) * (wexp/Gexp)||^2 + lambda * ||L H||^2
Input : lambda = regularization parameter,
Gexp = experimental data,
wexp = weighting factors,
H = guessed H,
kernMat = matrix for faster kernel evaluation
G0 = optional
Output : H_lam, [G0]
Default uses Trust-Region Method with Jacobian supplied by jacobianLM
"""
# send Hplus = [H, G0], on return unpack H and G0
if len(argv) > 0:
Hplus= np.append(H, argv[0])
res_lsq = least_squares(residualLM, Hplus, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x[:-1], res_lsq.x[-1]
# send normal H, and collect optimized H back
else:
res_lsq = least_squares(residualLM, H, jac=jacobianLM, args=(lam, Gexp, wexp, kernMat))
return res_lsq.x
def residualLM(H, lam, Gexp, wexp, kernMat):
"""
%
% HELPER FUNCTION: Gets Residuals r
Input : H = guessed H,
lambda = regularization parameter ,
Gexp = experimental data,
wexp = weighting factors,
kernMat = matrix for faster kernel evaluation
G0 = plateau
Output : a set of n+nl residuals,
the first n correspond to the kernel
the last nl correspond to the smoothness criterion
%"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
r = np.zeros(n + nl);
# if plateau then unfurl G0
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
# r[0:n] = (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat, G0)/Gexp) # the Gt and
else:
# r[0:n] = (1. - kernel_prestore(H, kernMat)/Gexp)
r[0:n] = wexp * (1. - kernel_prestore(H, kernMat)/Gexp)
# the curvature constraint is not affected by G0
r[n:n+nl] = np.sqrt(lam) * np.diff(H, n=2) # second derivative
return r
def jacobianLM(H, lam, Gexp, wexp, kernMat):
"""
HELPER FUNCTION for optimization: Get Jacobian J
returns a (n+nl * ns) matrix Jr; (ns + 1) if G0 is also supplied.
Jr_(i, j) = dr_i/dH_j
It uses kernelD, which approximates dK_i/dH_j, where K is the kernel
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
nl = ns - 2;
# L is a ns*ns tridiagonal matrix with 1 -2 and 1 on its diagonal;
L = np.diag(np.ones(ns-1), 1) + np.diag(np.ones(ns-1),-1) + np.diag(-2. * np.ones(ns))
L = L[1:nl+1,:]
# Furnish the Jacobian Jr (n+ns)*ns matrix
# Kmatrix = np.dot((1./Gexp).reshape(n,1), np.ones((1,ns)));
Kmatrix = np.dot((wexp/Gexp).reshape(n,1), np.ones((1,ns)));
if len(H) > ns:
G0 = H[-1]
H = H[:-1]
Jr = np.zeros((n + nl, ns+1))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
# Jr[0:n, ns] = -1./Gexp # column for dr_i/dG0
Jr[0:n, ns] = -wexp/Gexp # column for dr_i/dG0
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
Jr[n:n+nl, ns] = np.zeros(nl) # column for dr_i/dG0 = 0
else:
Jr = np.zeros((n + nl, ns))
Jr[0:n, 0:ns] = -kernelD(H, kernMat) * Kmatrix;
Jr[n:n+nl,0:ns] = np.sqrt(lam) * L;
return Jr
def kernelD(H, kernMat):
"""
Function: kernelD(input)
outputs the (n*ns) dimensional matrix DK(H)(t)
It approximates dK_i/dH_j = K * e(H_j):
Input: H = substituted CRS,
kernMat = matrix for faster kernel evaluation
Output: DK = Jacobian of H
"""
n = kernMat.shape[0];
ns = kernMat.shape[1];
# A n*ns matrix with all the rows = H'
Hsuper = np.dot(np.ones((n,1)), np.exp(H).reshape(1, ns))
DK = kernMat * Hsuper
return DK
def getContSpec(par):
"""
This is the main driver routine for computing the continuous spectrum
(*) input : "par" dictionary from "inp.dat" which specifies GexpFile (often 'Gt.dat')
(*) return : H and lambdaC; the latter can be used to microscpecify lambdaC as desired
without having to do the entire lcurve calculation again
"""
# read input
if par['verbose']:
print('\n(*) Start\n(*) Loading Data File: {}...'.format(par['GexpFile']))
# t, Gexp = GetExpData(par['GexpFile'])
t, Gexp, wexp = GetExpData(par['GexpFile'])
if par['verbose']:
print('(*) Initial Set up...', end="")
# Set up some internal variables
n = len(t)
ns = par['ns'] # discretization of 'tau'
tmin = t[0];
tmax = t[n-1];
# determine frequency window
if par['FreqEnd'] == 1:
smin = np.exp(-np.pi/2) * tmin; smax = np.exp(np.pi/2) * tmax
elif par['FreqEnd'] == 2:
smin = tmin; smax = tmax
elif par['FreqEnd'] == 3:
smin = np.exp(+np.pi/2) * tmin; smax = np.exp(-np.pi/2) * tmax
hs = (smax/smin)**(1./(ns-1))
s = smin * hs**np.arange(ns)
kernMat = getKernMat(s, t)
tic = time.time()
# get an initial guess for Hgs, G0
if par['plateau']:
Hgs, G0 = InitializeH(Gexp, wexp, s, kernMat, np.min(Gexp))
else:
Hgs = InitializeH(Gexp, wexp, s, kernMat)
if par['verbose']:
te = time.time() - tic
print('\t({0:.1f} seconds)\n(*) Building the L-curve ...'.format(te), end="")
tic = time.time()
# Find Optimum Lambda with 'lcurve'
if par['lamC'] == 0:
if par['plateau']:
lamC, lam, rho, eta, logP, Hlam = lcurve(Gexp, wexp, Hgs, kernMat, par, G0)
else:
lamC, lam, rho, eta, logP, Hlam = lcurve(Gexp, wexp, Hgs, kernMat, par)
else:
lamC = par['lamC']
if par['verbose']:
te = time.time() - tic
print('({1:.1f} seconds)\n(*) Extracting CRS, ...\n\t... lamC = {0:0.3e}; '.
format(lamC, te), end="")
tic = time.time()
# Get the best spectrum
if par['plateau']:
H, G0 = getH(lamC, Gexp, wexp, Hgs, kernMat, G0);
print('G0 = {0:0.3e} ...'.format(G0), end="")
else:
H = getH(lamC, Gexp, wexp, Hgs, kernMat);
#----------------------
# Print some datafiles
#----------------------
if par['verbose']:
te = time.time() - tic
print('done ({0:.1f} seconds)\n(*) Writing and Printing, ...'.format(te), end="")
# Save inferred G(t)
if par['plateau']:
K = kernel_prestore(H, kernMat, G0);
np.savetxt('output/H.dat', np.c_[s, H], fmt='%e', header='G0 = {0:0.3e}'.format(G0))
else:
K = kernel_prestore(H, kernMat);
np.savetxt('output/H.dat', np.c_[s, H], fmt='%e')
np.savetxt('output/Gfit.dat', np.c_[t, K], fmt='%e')
# print Hlam, rho-eta, and logP if lcurve has been visited
if par['lamC'] == 0:
if os.path.exists("output/Hlam.dat"):
os.remove("output/Hlam.dat")
fHlam = open('output/Hlam.dat','ab')
for i, lamb in enumerate(lam):
np.savetxt(fHlam, Hlam[:,i])
fHlam.close()
# print logP
np.savetxt('output/logPlam.dat', np.c_[lam, logP])
# print rho-eta
np.savetxt('output/rho-eta.dat', np.c_[lam, rho, eta], fmt='%e')
#------------
# Graphing
#------------
if par['plotting']:
# plot spectrum "H.pdf" with errorbars
plt.clf()
plt.semilogx(s,H,'o-')
plt.xlabel(r'$s$')
plt.ylabel(r'$H(s)$')
# error bounds are only available if lcurve has been implemented
if par['lamC'] == 0:
plam = np.exp(logP); plam = plam/np.sum(plam)
Hm = np.zeros(len(s))
Hm2 = np.zeros(len(s))
cnt = 0
for i in range(len(lam)):
#~ Hm += plam[i]*Hlam[:,i]
#~ Hm2 += plam[i]*Hlam[:,i]**2
# count all spectra within a threshold
if plam[i] > 0.1:
Hm += Hlam[:,i]
Hm2 += Hlam[:,i]**2
cnt += 1
Hm = Hm/cnt
dH = np.sqrt(Hm2/cnt - Hm**2)
plt.semilogx(s,Hm+2.5*dH, c='gray', alpha=0.5)
plt.semilogx(s,Hm-2.5*dH, c='gray', alpha=0.5)
plt.tight_layout()
plt.savefig('output/H.pdf')
#
# plot comparison with input spectrum
#
plt.clf()
if par['plateau']:
K = kernel_prestore(H, kernMat, G0);
else:
K = kernel_prestore(H, kernMat);
plt.loglog(t, Gexp,'o',t, K, 'k-')
plt.xlabel(r'$t$')
plt.ylabel(r'$G(t)$')
plt.tight_layout()
plt.savefig('output/Gfit.pdf')
#
# if lam not explicitly specified then print rho-eta.pdf
#
try:
lam
except NameError:
print("lamC prespecified, so not printing rho-eta.pdf/dat")
else:
plt.clf()
plt.scatter(rho, eta, marker='x')
plt.plot(rho, eta)
rhost = np.exp(np.interp(np.log(lamC), np.log(lam), np.log(rho)))
etast = np.exp(np.interp(np.log(lamC), np.log(lam), np.log(eta)))
plt.plot(rhost, etast, 'o', color='k')
plt.xscale('log')
plt.yscale('log')
#~ print(rhost, etast)
plt.xlabel(r'$\rho$')
plt.ylabel(r'$\eta$')
plt.tight_layout()
plt.savefig('output/rho-eta.pdf')
if par['verbose']:
print('done\n(*) End\n')
return H, lamC
def guiFurnishGlobals(par):
"""Furnish Globals to accelerate interactive plot in jupyter notebooks"""
# plot settings
from matplotlib import rcParams
rcParams['axes.labelsize'] = 14
rcParams['xtick.labelsize'] = 12
rcParams['ytick.labelsize'] = 12
rcParams['legend.fontsize'] = 12
rcParams['lines.linewidth'] = 2
# experimental data
t, Gexp, wG = GetExpData(par['GexpFile'])
n = len(t)
ns = par['ns'] # discretization of 'tau'
tmin = t[0];
tmax = t[n-1];
# determine frequency window
if par['FreqEnd'] == 1:
smin = np.exp(-np.pi/2) * tmin; smax = np.exp(np.pi/2) * tmax
elif par['FreqEnd'] == 2:
smin = tmin; smax = tmax
elif par['FreqEnd'] == 3:
smin = np.exp(+np.pi/2) * tmin; smax = np.exp(-np.pi/2) * tmax
hs = (smax/smin)**(1./(ns-1))
s = smin * hs**np.arange(ns)
kernMat = getKernMat(s,t)
# toggle flags to prevent printing
par['verbose'] = False
par['plotting'] = False
# load lamda, rho, eta
lam, rho, eta = np.loadtxt('output/rho-eta.dat', unpack=True)
plt.clf()
return s, t, kernMat, Gexp, par, lam, rho, eta
#
# Main Driver: This part is not run when contSpec.py is imported as a module
# For example as part of GUI
#
if __name__ == '__main__':
#
# Read input parameters from file "inp.dat"
#
par = readInput('inp.dat')
H, lamC = getContSpec(par)
|
11,935 | 94d5e8f87f87d68419b21cc275165a9392040518 | # -*- coding: utf-8 -*-
def partition(nums, start, end):
if len(nums) <= 0 or start < 0 or end >= len(nums) or start > end:
return
pivot = nums[start]
while start < end:
while start < end and nums[end] >= pivot:
end -= 1
nums[start] = nums[end]
while start < end and nums[start] <= pivot:
start += 1
nums[end] = nums[start]
nums[start] = pivot
return start
def quick_sort_core(nums, start, end):
if len(nums) <= 0 or start > end:
return
k = partition(nums, start, end)
if k > start:
quick_sort_core(nums, start, k - 1)
if k < end:
quick_sort_core(nums, k + 1, end)
def quick_sort(nums):
if len(nums) <= 0:
return
quick_sort_core(nums, 0, len(nums) - 1)
if __name__ == '__main__':
nums = [4, 9, 0, 0, -1, -2, 4]
quick_sort(nums)
print(nums)
|
11,936 | a2b8d6418eed20e484721f8b750792355d042f47 |
def test_dynamicdata(datasetdynamic):
print(datasetdynamic)
|
11,937 | cf6d499755888fbcebd2cc62fb9125c6759b9ba2 | #creating a functione group_by_owners
def group_by_owners(dict):
finalDict={}
for key,values in dict.items(): #iterating on the dictionary items
finalDict[values]=finalDict.get(values,[])+[key] #creating a list of values of dictionary and appending it with keys
print(finalDict)
return finalDict
dict={'input.txt':'randy',
'code.py':'stan',
'output.txt':'randy'}
group_by_owners(dict)
|
11,938 | f273e8924845ab5ac5bbecd9a48d804ed04f808a | # Generated by Django 3.1.7 on 2021-04-30 13:03
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('apirest', '0013_auto_20210430_0901'),
]
operations = [
migrations.RenameField(
model_name='artista',
old_name='self',
new_name='self_artista',
),
]
|
11,939 | 57a3552f8fd84485ade632114893d1124d4e1f5e | # Generated by Django 3.0.4 on 2020-05-24 09:34
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('activity_record', '0016_activerecord_study_amount'),
]
operations = [
migrations.AddField(
model_name='activerecord',
name='format_study_amount',
field=models.CharField(max_length=20, null=True, verbose_name=''),
),
]
|
11,940 | f955640a30421ae9c9fb03cef10b8296010fecc0 | import numpy as np
import cv2
from mpl_toolkits import mplot3d
import numpy as np
import matplotlib.pyplot as plt
first = True
neighbor = [
[1, -1, 0],
[1, 0, -1],
[0, 1, -1],
[0, -1, 1],
[-1, 0, 1],
[-1, 1, 0]
]
with open('error_keep.csv', 'r') as f:
f.readline()
for i in range(3):
vote_record = {}
for j in range(9):
record = np.zeros((11, 11, 11))
for k in range(66):
g, ss, sr, wb, wg, wr, error = f.readline().strip().split(',')
index_b = int(float(wb)*10)
index_g = int(float(wg)*10)
index_r = int(float(wr)*10)
record[index_b][index_g][index_r] = error
for wb in range(11):
for wg in range(10-wb, -1, -1):
wr = 10 - wb - wg
min_error = -1
for n in neighbor:
n_wb = wb + n[0]
n_wg = wg + n[1]
n_wr = wr + n[2]
if n_wb < 0 or n_wb > 10 or n_wg < 0 or n_wg > 10 or n_wr < 0 or n_wr > 10:
continue
elif min_error < 0 or record[n_wb][n_wg][n_wr] < min_error:
min_error = record[n_wb][n_wg][n_wr]
if record[wb][wg][wr] <= min_error:
key = '{}_{}_{}'.format(wb, wg, wr)
if key in vote_record:
vote_record[key] += 1
else:
vote_record[key] = 1
print(key)
record_plot = np.sum(record, axis=-1)
record_plot -= np.min(record_plot[record_plot != 0])
record_plot /= np.max(record_plot)
record_plot *= 255
record_plot[record_plot < 0] = 200
record_plot = record_plot.astype(np.uint8)
cv2.imshow('error', record_plot)
cv2.waitKey(0)
cv2.destroyAllWindows()
print(vote_record)
keys = list(vote_record.keys())
keys.sort(key=lambda x: vote_record[x], reverse=True)
print(keys) |
11,941 | 0c08041322a0ef1caa76064b6d457997f46181aa | from django.db import models
from MHLogin.DoctorCom.models import Click2Call_Log, MessageLog, PagerLog
from MHLogin.MHLUsers.models import MHLUser
class TwilioCallGatherTest(models.Model):
tester = models.ForeignKey(MHLUser)
callid = models.CharField(max_length=34, blank=True, null=True)
debug_data = models.TextField()
success = models.CharField(max_length=100)
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
class TwilioRecordTest(models.Model):
tester = models.ForeignKey(MHLUser)
callid = models.CharField(max_length=34, blank=True, null=True)
recordingurl = models.TextField(blank=True, null=True)
debug_data = models.TextField()
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
class ConvergentTest(models.Model):
tester = models.ForeignKey(MHLUser)
message = models.TextField(blank=True)
confirmations = models.CharField(max_length=250)
success = models.IntegerField(default=0) # 0 for fail/unknown, 1 for success.
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
class DoctorComC2CTest(models.Model):
tester = models.ForeignKey(MHLUser)
call = models.ForeignKey(Click2Call_Log, null=True)
success = models.IntegerField(default=0) # 0 for fail/unknown, 1 for success.
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
class DoctorComPagerTest(models.Model):
tester = models.ForeignKey(MHLUser)
page = models.ForeignKey(PagerLog, null=True)
success = models.IntegerField(default=0) # 0 for fail/unknown, 1 for success.
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
class DoctorComSMSTest(models.Model):
tester = models.ForeignKey(MHLUser)
message = models.ForeignKey(MessageLog, null=True)
success = models.IntegerField(default=0) # 0 for fail/unknown, 1 for success.
timestamp = models.DateTimeField(auto_now_add=True, editable=False)
|
11,942 | 58f40601442b90ffbc2ddb3bcee2133371a5abfe | import torch
import torch.nn as nn
class ConvLstm(nn.Module):
def __init__(self, in_channels, kernel_size, padding_mode="reflect"):
super(ConvLstm, self).__init__()
padding = kernel_size // 2
self.convi = nn.Sequential(
nn.Conv2d(in_channels=2*in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
padding_mode=padding_mode),
nn.Sigmoid()
)
self.convf = nn.Sequential(
nn.Conv2d(in_channels=2*in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
padding_mode=padding_mode),
nn.Sigmoid()
)
self.convg = nn.Sequential(
nn.Conv2d(in_channels=2*in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
padding_mode=padding_mode),
nn.Tanh()
)
self.convo = nn.Sequential(
nn.Conv2d(in_channels=2*in_channels,
out_channels=in_channels,
kernel_size=kernel_size,
padding=padding,
padding_mode=padding_mode),
nn.Sigmoid()
)
def forward(self, xh, c):
i = self.convi(xh)
f = self.convf(xh)
g = self.convg(xh)
o = self.convo(xh)
c = f * c + i * g
h = o * torch.tanh(c)
return h, c
if __name__ == '__main__':
batch_size = 10
channels = 32
height = 10
width = 10
kernel_size = 3
x = torch.rand(batch_size, channels, height, width)
h = torch.rand(batch_size, channels, height, width)
c = torch.rand(batch_size, channels, height, width)
model = ConvLstm(in_channels=channels, kernel_size=kernel_size)
h, c = model(torch.cat([x, h], 1), c)
print(h.shape, c.shape)
|
11,943 | 15855f112158eac630e2f00709e4712dd224e673 | """google_Shuffle_Pattern.py
Given a hand of cards, and you need to shuffle it. Assume that you always shuffle
the cards exactly the same way.
Will the cards reach the original state if you keep shuffling it? And if so, given
the shuffle pattern, how many times it will take to shuffle it back to its original
state?
Pattern is given as a integer array A, with no duplicates.
Ai means you shuffle the card at index i to index Ai
"""
class Solution(object):
""""""
def __init__(self, ptn):
""""""
self._ptn = map(lambda x : x - 1, ptn) # 0-based.
self._unvisit = set(self._ptn)
self._group_size = []
def solve(self):
""""""
while self._unvisit:
i = self._unvisit.pop()
cursor = i
size = 1
while self._ptn[cursor] != i:
cursor = self._ptn[cursor]
self._unvisit.remove(cursor) # mark as visited
size += 1
self._group_size.append(size)
return self.lcm(*self._group_size)
def gcd(self, a, b):
""""""
return self.gcd(b % a, a) if a != 0 else b
def lcm(self, *args):
""""""
retval = 1
for i in args:
retval = (retval * i) / self.gcd(retval, i)
return retval
def main():
print Solution([2, 1, 4, 3]).solve()
print Solution([5, 3, 1, 2, 4]).solve()
if __name__ == '__main__':
main()
|
11,944 | 748d72eff78c4f50d99eb99e2f57c085b5781cde | """
tint.tracks
===========
Cell_tracks class.
"""
from __future__ import annotations
import copy
from typing import Dict, Iterator, Optional, cast
import numpy as np
import pandas as pd
from tqdm.std import tqdm
from .config import config as tint_config
from .grid_utils import extract_grid_data, get_grid_size, get_radar_info
from .helpers import Counter, GridType, Record
from .matching import get_pairs
from .objects import (
get_object_prop,
init_current_objects,
update_current_objects,
write_tracks,
)
from .phase_correlation import get_global_shift
from .types import ConfigType
class Cell_tracks:
"""
This is the main class in the module. It allows tracks
objects to be built using lists of data arrays.
Attributes
----------
params : dict
Parameters for the tracking algorithm.
field : str
String specifying data variable to be used for tracking. Default is
'reflectivity'.
grid_size : array
Array containing z, y, and x mesh size in meters respectively.
last_grid : Grid
Contains the most recent grid object tracked. This is used for dynamic
updates.
counter : Counter
See Counter class.
record : Record
See Record class.
current_objects : dict
Contains information about objects in the current scan.
_tracks : DataFrame
_saved_record : Record
Deep copy of Record at the penultimate scan in the sequence. This and
following 2 attributes used for link-up in dynamic updates.
_saved_counter : Counter
Deep copy of Counter.
_saved_objects : dict
Deep copy of current_objects.
"""
def __init__(self, field: str = "reflectivity"):
self.field = field
self.grid_size: Optional[np.ndarray] = None
self.radar_info: Optional[dict[str, float]] = None
self.last_grid: Optional[GridType] = None
self.counter: Optional[Counter] = None
self.record: Optional[Record] = None
self.current_objects: Optional[dict[str, np.ndarray]] = None
self._tracks = pd.DataFrame()
self._saved_record: Optional[Record] = None
self._saved_counter: Optional[Counter] = None
self._saved_objects: Optional[dict[str, np.ndarray]] = None
@property
def params(self) -> ConfigType:
"""Get the tracking parameters."""
return cast(ConfigType, tint_config)
def _save(self) -> None:
"""Saves deep copies of record, counter, and current_objects."""
self._saved_record = copy.deepcopy(self.record)
self._saved_counter = copy.deepcopy(self.counter)
self._saved_objects = copy.deepcopy(self.current_objects)
def _load(self) -> None:
"""Loads saved copies of record, counter, and current_objects. If new
tracks are appended to existing tracks via the get_tracks method, the
most recent scan prior to the addition must be overwritten to link up
with the new scans. Because of this, record, counter and
current_objects must be reverted to their state in the penultimate
iteration of the loop in get_tracks. See get_tracks for details."""
self.record = self._saved_record
self.counter = self._saved_counter
self.current_objects = self._saved_objects
@property
def tracks(self) -> pd.DataFrame:
"""A pandas.DataFrame representation of the tracked cells."""
return self._tracks
def _get_tracks(
self,
grids: Iterator[GridType],
pbar: tqdm,
centre: Optional[tuple[float, float]] = None,
) -> int:
raw2: Optional[np.ndarray] = None
if self.record is None:
# tracks object being initialized
grid_obj2 = next(grids)
self.grid_size = get_grid_size(grid_obj2)
if centre is None:
xgrid = grid_obj2.x.values
ygrid = grid_obj2.y.values
if len(xgrid.shape) == 2:
x_c = xgrid[xgrid.shape[0] // 2][xgrid.shape[1] // 2]
else:
x_c = xgrid[xgrid.shape[0] // 2]
if len(ygrid.shape) == 2:
y_c = ygrid[ygrid.shape[0] // 2][ygrid.shape[1] // 2]
else:
y_c = ygrid[ygrid.shape[0] // 2]
x_c = cast(float, x_c)
y_c = cast(float, y_c)
self.radar_info = get_radar_info((x_c, y_c))
else:
self.radar_info = get_radar_info(centre)
self.counter = Counter()
self.record = Record(grid_obj2)
else:
# tracks object being updated
grid_obj2 = cast(GridType, self.last_grid)
self._tracks.drop(self.record.scan + 1) # last scan is overwritten
new_rain = bool(self.current_objects is None)
stop_iteration = bool(grid_obj2 is None)
raw2, frame2 = extract_grid_data(grid_obj2, self.params)
while not stop_iteration:
pbar.update()
grid_obj1 = grid_obj2
raw1 = raw2
frame1 = frame2
try:
grid_obj2 = next(grids)
except StopIteration:
stop_iteration = True
if not stop_iteration:
self.record.update_scan_and_time(grid_obj1, grid_obj2)
raw2, frame2 = extract_grid_data(grid_obj2, self.params)
else:
# setup to write final scan
self._save()
self.last_grid = grid_obj1
self.record.update_scan_and_time(grid_obj1)
raw2 = None
frame2 = np.zeros_like(frame1)
if np.nanmax(frame1) == 0:
new_rain = True
self.current_objects = None
continue
global_shift = cast(float, get_global_shift(raw1, raw2))
pairs = cast(
np.ndarray,
get_pairs(
frame1,
frame2,
global_shift,
self.current_objects,
self.record,
self.params,
),
)
if new_rain:
# first nonempty scan after a period of empty scans
self.current_objects, self.counter = init_current_objects(
frame1, frame2, pairs, cast(Counter, self.counter)
)
new_rain = False
else:
self.current_objects, self.counter = update_current_objects(
frame1,
frame2,
pairs,
cast(Dict[str, np.ndarray], self.current_objects),
cast(Counter, self.counter),
)
obj_props = get_object_prop(
frame1, grid_obj1, self.record, self.params
)
self.record.add_uids(self.current_objects)
self._tracks = write_tracks(
self._tracks, self.record, self.current_objects, obj_props
)
del grid_obj1, raw1, frame1, global_shift, pairs, obj_props
# scan loop end
self._load()
ncells = 0
if len(self._tracks):
ncells = self._tracks.index.get_level_values(1).astype(int).max() + 1
return ncells
|
11,945 | bd7af4b2bb0cadf3f34db76da05ee88ea8d21a12 | import pickle
import pandas as pd
import numpy as np
import sys
from src.Node import Node
from src.NCMTree import NCMTree
from src.NCMForest import NCMForest
import itertools
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import time
from datetime import datetime
import itertools
import multiprocessing
from multiprocessing import Pool, Lock
from sklearn.metrics import accuracy_score
from sklearn.model_selection import cross_val_score
import os
from itertools import repeat
from functools import partial
import random
from sklearn.metrics import classification_report
sys.path.append("..")
from headers.utils import load_beer_dataset
def accuracy_print(X_train, y_train, X_test, y_test, clf, bool_cross=False, print_f=False):
"""
:param X_train:
:param y_train:
:param X_test:
:param y_test:
:param clf:
:param bool_cross:
:param print_f:
:return:
"""
start = time.time()
y_train_pred = clf.predict(X_train)
end = time.time() - start
y_test_pred = clf.predict(X_test)
score_train = accuracy_score(y_train, y_train_pred)
score_test = accuracy_score(y_test, y_test_pred)
if bool_cross:
cross_val = cross_val_score(clf, X_train, y_train, cv=5, n_jobs=-1)
else:
cross_val = False
if print_f:
print("Score en train : ", round(score_train, 3))
print("Score en test : ", round(score_test, 3))
print("Cross-val mean : ", round(cross_val.mean(), 3), " ecart-type ", round(cross_val.std(), 3))
return score_train, score_test, cross_val, end
def grid_search_rapport_njobs(params_dict, X_train, y_train, X_test, y_test, file='rapport.csv', verbose=1,
save_iteration=20, n_jobs=1, n_random=None):
"""
:param params_dict:
:param X:
:param y:
:param file:
:param verbose:
:param save_iteration:
:param n_jobs:
:return:
"""
d = datetime.now()
f = d.strftime('%Y-%m-%d-%H-%M_')
manager = multiprocessing.Manager()
ns = manager.Namespace()
lock = manager.Lock()
ns.rapport_df = pd.DataFrame(columns=['n_trees', 'method_subclasses', 'method_max_features', 'distance', 'method_split',
'min_samples_leaf', 'min_samples_split', 'max_depth', 'score_train',
'score_test', 'avg_depth', 'avg_size', 'fit_time', 'predict_time'])
ns.it = 0
ns.file_name = 'results/' + f + file
ns.verbose = verbose
ns.save_iteration = save_iteration
print("START :")
start_full = time.time()
ns.X_train = X_train
ns.X_test = X_test
ns.y_train = y_train
ns.y_test = y_test
keys = params_dict.keys()
values = (params_dict[key] for key in keys)
combinations = [dict(zip(keys, combination)) for combination in itertools.product(*values)]
if n_random:
combinations = random.choices(combinations, k=n_random)
ns.nb_combi = len(combinations)
print("Number of combinations: ", str(ns.nb_combi))
if n_jobs == -1:
p = Pool(processes=os.cpu_count())
else:
p = Pool(processes=n_jobs)
res = p.map(partial(test_combinaison, ns=ns, lock=lock), combinations)
p.close()
p.join()
ns.rapport_df.to_csv(ns.file_name, sep=';')
print("END :" + str(time.time() - start_full))
return ns.rapport_df
def test_combinaison(comb, ns, lock):
"""
:param comb:
:param ns:
:param lock:
:return:
"""
try:
ncm = NCMForest(n_trees=comb['n_trees'],
method_k_bis=comb['method_subclasses'],
method_max_features=comb['method_max_features'],
distance=comb['distance'],
method_split=comb['method_split'],
min_samples_leaf=comb['min_samples_leaf'],
min_samples_split=comb['min_samples_split'],
max_depth=comb['max_depth']
)
start_fit = time.time()
if ns.verbose > 1:
print("Fitting with params: \n")
print(
"n_trees:{}\nmethod_kbis:{}\nmethod_max_features:{}\ndistance:{}\nmethod_split:{}\nmin_samples_leaf:{}\nmin_samples_split:{}\nmax_depth:{}".format(
comb['n_trees'], comb['method_subclasses'], comb['method_max_features'], comb['distance'],
comb['method_split'], comb['min_samples_leaf'], comb['min_samples_split'], comb['max_depth']))
ncm.fit(ns.X_train, ns.y_train)
end_fit = time.time() - start_fit
score_train, score_test, _, predict_time = accuracy_print(ns.X_train, ns.y_train, ns.X_test, ns.y_test, ncm)
if ns.verbose > 1:
print(ncm)
print('Fit time :', end_fit)
print('Predict time :', predict_time)
lock.acquire()
try:
depths = 0
cardinalities = 0
for tree in ncm.trees:
depths = depths + tree.depth
cardinalities = cardinalities + tree.cardinality
avg_depth = depths / len(ncm.trees)
avg_size = cardinalities / len(ncm.trees)
if ns.verbose > 1:
print("[ACQUIRE] : About to write in ns.rapport_df")
ns.rapport_df = ns.rapport_df.append([{
'n_trees': comb['n_trees'],
'method_subclasses': comb['method_subclasses'],
'method_max_features': comb['method_max_features'],
'distance': comb['distance'],
'method_split': comb['method_split'],
'min_samples_leaf': comb['min_samples_leaf'],
'min_samples_split': comb['min_samples_split'],
'max_depth': comb['max_depth'],
'score_train': round(score_train, 3),
'score_test': round(score_test, 3),
'avg_depth': round(avg_depth, 3),
'avg_size': round(avg_size, 3),
'fit_time': round(end_fit, 1),
'predict_time': round(predict_time, 1)}])
finally:
lock.release()
if ns.verbose > 1:
print("[RELEASE] : Release rapport_df")
if ns.it % ns.save_iteration == 0:
print("[ACQUIRE]")
lock.acquire()
try:
ns.rapport_df.to_csv(ns.file_name, sep=';')
finally:
lock.release()
except Exception as e:
lock.acquire()
try:
ns.rapport_df = ns.rapport_df.append([{
'n_trees': (comb, e),
'method_subclasses': 0,
'method_max_features': 0,
'distance': 0,
'method_split': 0,
'min_samples_leaf': 0,
'min_samples_split': 0,
'max_depth': 0,
'score_train': 0,
'score_test': 0,
'avg_depth': 0,
'avg_size': 0,
'fit_time': 0,
'predict_time': 0}])
print('ERROR : saving file..')
print(e)
ns.rapport_df.to_csv(ns.file_name, sep=';')
finally:
lock.release()
ns.it += 1
if ns.verbose > 0:
print()
print(
"""Progression : """ + str(round((ns.it / ns.nb_combi) * 100)) + """% (""" + str(ns.it) + """ of """ + str(
ns.nb_combi) + """)""")
return ns.rapport_df
def grid_search_rapport(params_dict, X_train, y_train, X_test, y_test, file='rapport.csv', verbose=1, save_iteration=20,
n_random=None):
d = datetime.now()
string_date = d.strftime('%Y-%m-%d-%H-%M_')
file_name = 'results/' + string_date + file
print("START :")
start_full = time.time()
keys = params_dict.keys()
values = (params_dict[key] for key in keys)
combinations = [dict(zip(keys, combination)) for combination in itertools.product(*values)]
if n_random:
combinations = random.choices(combinations, k=n_random)
nb_combi = len(combinations)
print("Number of combinations: ", str(nb_combi))
it = 1
rapport_df = pd.DataFrame(columns=['n_trees', 'method_subclasses', 'method_max_features', 'distance', 'method_split',
'min_samples_leaf', 'min_samples_split', 'max_depth', 'score_train',
'score_test', 'avg_depth', 'avg_size', 'fit_time', 'predict_time'])
for comb in combinations:
try:
ncm = NCMForest(n_trees=comb['n_trees'],
method_k_bis=comb['method_subclasses'],
method_max_features=comb['method_max_features'],
distance=comb['distance'],
method_split=comb['method_split'],
min_samples_leaf=comb['min_samples_leaf'],
min_samples_split=comb['min_samples_split'],
max_depth=comb['max_depth']
)
start_fit = time.time()
if verbose > 1:
print("Fitting with params: \n")
print(
"n_trees:{}\nmethod_kbis:{}\nmethod_max_features:{}\ndistance:{}\nmethod_split:{}\nmin_samples_leaf:{}\nmin_samples_split:{}\nmax_depth:{}".format(
comb['n_trees'], comb['method_subclasses'], comb['method_max_features'], comb['distance'],
comb['method_split'], comb['min_samples_leaf'], comb['min_samples_split'], comb['max_depth']))
ncm.fit(X_train, y_train)
end_fit = time.time() - start_fit
score_train, score_test, _, predict_time = accuracy_print(X_train, y_train, X_test, y_test, ncm)
if verbose > 1:
print(ncm)
print('Fit time :', end_fit)
print('Predict time :', predict_time)
depths = 0
cardinalities = 0
for tree in ncm.trees:
depths = depths + tree.depth
cardinalities = cardinalities + tree.cardinality
avg_depth = depths / len(ncm.trees)
avg_size = cardinalities / len(ncm.trees)
rapport_df = rapport_df.append([{
'n_trees': comb['n_trees'],
'method_subclasses': comb['method_subclasses'],
'method_max_features': comb['method_max_features'],
'distance': comb['distance'],
'method_split': comb['method_split'],
'min_samples_leaf': comb['min_samples_leaf'],
'min_samples_split': comb['min_samples_split'],
'max_depth': comb['max_depth'],
'score_train': round(score_train, 3),
'score_test': round(score_test, 3),
'avg_depth': round(avg_depth, 3),
'avg_size': round(avg_size, 3),
'fit_time': round(end_fit, 1),
'predict_time': round(predict_time, 1)}])
if it % save_iteration == 0:
rapport_df.to_csv(file_name, sep=';')
if verbose > 0:
print(
"""Progression : """ + str(round((it / nb_combi) * 100)) + """% (""" + str(it) + """ of """ + str(
nb_combi) + """)""")
except Exception as e:
rapport_df = rapport_df.append([{
'comb': (comb, e),
'score_train': 0,
'score_test': 0,
'depth': 0,
'fit_time': 0,
'predict_time': 0}])
print('ERROR : saving file..')
rapport_df.to_csv(file_name, sep=';')
it += 1
rapport_df.to_csv(file_name, sep=';')
print("END :" + str(time.time() - start_full))
return rapport_df
def incremental_grid_search_rapport_njobs(params_dict, X_typo, y_typo, X_manu_train, y_manu_train, X_manu_test,
y_manu_test, file='rapport.csv', verbose=1, n_jobs=-1, n_random=None, path=None, mode=None):
"""
:param params_dict:
:param X:
:param y:
:param file:
:param verbose:
:param save_iteration:
:param n_jobs:
:param n_random:
:param model:
:param path:
:param batch_size:
:return:
"""
d = datetime.now()
f = d.strftime('%Y-%m-%d-%H-%M_')
manager = multiprocessing.Manager()
ns = manager.Namespace()
lock = manager.Lock()
#ns.rapport_df = pd.DataFrame(columns=['BATCH_SIZE', 'score_manu', 'score_typo', 'jensen_threshold', 'pi'])
ns.score_df_manu = pd.DataFrame()
ns.score_df_typo = pd.DataFrame()
ns.it = 0
ns.file_name_typo = 'results/' + f +'_typo_'+ file
ns.file_name_manu = 'results/' + f +'_manu_'+ file
ns.verbose = verbose
print('START ', f)
start_full = time.time()
ns.X_typo = X_typo
ns.y_typo = y_typo
ns.X_manu_train = X_manu_train
ns.y_manu_train = y_manu_train
ns.X_manu_test = X_manu_test
ns.y_manu_test = y_manu_test
keys = params_dict.keys()
values = (params_dict[key] for key in keys)
combinations = [dict(zip(keys, combination)) for combination in itertools.product(*values)]
if n_random:
combinations = random.choices(combinations, k=n_random)
ns.nb_combi = len(combinations)
print("Number of combinations: ", str(ns.nb_combi))
if n_jobs == -1:
p = Pool(processes=os.cpu_count())
else:
p = Pool(processes=n_jobs)
p.map(partial(test_combinaison_inc, ns=ns, lock=lock, path=path, mode=mode),
combinations)
p.close()
p.join()
print("END :" + str(time.time() - start_full))
def load_batch(X, y, cursor, BATCH_SIZE):
if cursor + BATCH_SIZE < len(X):
batch_X = X[cursor:cursor + BATCH_SIZE]
batch_y = y[cursor:cursor + BATCH_SIZE]
cursor = cursor + BATCH_SIZE
else:
batch_X = X[cursor:]
batch_y = y[cursor:]
return batch_X, batch_y, cursor
def load_model(path):
return pickle.load(open(path, "rb"))
def test_combinaison_inc(comb, ns, lock, path, mode):
"""
:param comb:
:param ns:
:param lock:
:return:
"""
nb_batch = int(len(ns.X_manu_train) / comb['batch_size'])
if len(ns.X_manu_train) % comb['batch_size'] == 0:
nb_batch = nb_batch
else:
nb_batch = nb_batch + 1
cursor = 0
print("Nb batch:", nb_batch)
if ns.verbose > 1:
print("Fitting with params: \n")
print(
"BATCH_SIZE:{}\njensen_threshold:{}\nrecreate:{}\npi:{}".format(
comb['batch_size'], comb['jensen_threshold'], comb['recreate'], comb['pi']))
model = load_model(path)
for i in range(0, nb_batch):
try:
print(mode, " BATCH ", i)
batch_X, batch_y, cursor = load_batch(ns.X_manu_train, ns.y_manu_train, cursor, comb['batch_size'])
start_fit = time.time()
if mode == "IGT":
print("fit IGT")
print(batch_X.shape)
model.IGT(batch_X, batch_y, jensen_threshold=comb['jensen_threshold'], recreate=comb['recreate'])
else:
print("fit RTST")
print(batch_X.shape)
model.RTST(batch_X, batch_y, jensen_threshold=comb['jensen_threshold'], pi=comb['pi'],
recreate=comb['recreate'])
end_fit = time.time() - start_fit
print("----------- Incremental Done -------------")
print('----- Predict Manu-----')
y_pred_manu = model.predict(ns.X_manu_test)
print('----- Predict Typo-----')
y_pred_typo = model.predict(ns.X_typo)
# Store result
df_temp_manu = pd.DataFrame(classification_report(ns.y_manu_test, y_pred_manu, output_dict=True, digits=4)).loc[["recall"]]
df_temp_manu["batch_nb"] = i
df_temp_manu["time"] = round(end_fit, 3)
df_temp_manu["batch_size"] = comb['batch_size']
df_temp_manu["jensen_threshold"] = comb['jensen_threshold']
df_temp_manu["recreate"] = comb['recreate']
df_temp_manu["pi"] = comb['pi']
df_temp_manu["mode"] = mode
lock.acquire()
try:
ns.score_df_manu = ns.score_df_manu.append(df_temp_manu)
print("Write df in "+ns.file_name_manu)
ns.score_df_manu.to_csv(ns.file_name_manu, sep=";")
finally:
lock.release()
df_temp_typo = pd.DataFrame(classification_report(ns.y_typo, y_pred_typo, output_dict=True, digits=4)).loc[["recall"]]
df_temp_typo["batch_nb"] = i
df_temp_typo["time"] = round(end_fit, 3)
df_temp_typo["batch_size"] = comb['batch_size']
df_temp_typo["jensen_threshold"] = comb['jensen_threshold']
df_temp_typo["recreate"] = comb['recreate']
df_temp_typo["pi"] = comb['pi']
df_temp_typo["mode"] = mode
lock.acquire()
try:
ns.score_df_typo = ns.score_df_typo.append(df_temp_typo)
print("Write df in "+ns.file_name_typo)
ns.score_df_typo.to_csv(ns.file_name_typo, sep=";")
finally:
lock.release()
if ns.verbose > 1:
print("cursor :", cursor)
print("Score test manu :", df_temp_manu["accuracy"].values)
print("Score test typo :", df_temp_typo["accuracy"].values)
except Exception as e:
print(e)
lock.acquire()
try:
ns.it += 1
finally:
lock.release()
if ns.verbose > 0:
print()
print(
"""Progression : """ + str(round((ns.it / ns.nb_combi) * 100)) + """% (""" + str(ns.it) + """ of """ + str(
ns.nb_combi) + """)""")
|
11,946 | 5c56be35cd0a0efc0d41da2ad1701a202a0a0c77 | #ZHU Yiming; ZHENG Nianzhao; MAO Zhenyu
import asyncio
import logging
import socket
from router import Router
class Session:
def __init__(self, sock, addr):
self.sock = sock
self.addr = addr
logging.info(f"Coming Request from {addr}")
class HttpServer:
HTTP_HOST = "127.0.0.1"
HTTP_PORT = 8080
@classmethod
async def serve(cls):
http_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
http_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
http_socket.bind((cls.HTTP_HOST, cls.HTTP_PORT))
logging.info(f"Server started on port {cls.HTTP_PORT}")
http_socket.listen(5)
http_socket.setblocking(False)
loop = asyncio.get_event_loop()
router = Router()
while True:
c_conn, c_addr = await loop.sock_accept(http_socket)
session = Session(c_conn, c_addr)
loop.create_task(router.resolve(session))
if __name__ == '__main__':
asyncio.run(HttpServer.serve())
|
11,947 | 7bb985c50b3d8b22f77c118f8fd19da84cde4720 | import json
import csv
from pymongo import MongoClient
# Compose.io url
MONGOHQ_URL = "mongodb://codeguild2:pleese-keep-secret@dharma.mongohq.com:10023/qscfadm"
def get_storage_class(format):
'''Given a file type, return a class that can store that format'''
kStgMap = {
'json' : TodoListJsonFileStorage,
'csv' : TodoListCSVFileStorage,
'cloud' : TodoListCloudStorage
}
try:
return kStgMap[format.lower()]
except KeyError:
return None
class TodoListJsonFileStorage(object):
"""Used by TodoList, this class takes care or saving a loading todo lists as a json file"""
def __init__(self, filename):
self._filename = filename + '.json'
def write(self, todolist):
with open(self._filename, 'w') as fp:
json.dump([itm.serialize() for itm in todolist], fp)
def read(self):
with open(self._filename, 'r') as fp:
return json.load(fp)
class TodoListCloudStorage(object):
def __init__(self, colName):
# For cloud storage, we must make filename (as a collection name) unique... (os.getlogin())
self._collectionName = 'jamil_' + colName[6:]
def _connect(self):
# Connect & get a ref to the database & collection. These attributes dynamically added to client object
self._mongo_client = MongoClient(MONGOHQ_URL)
self._mongo_db = self._mongo_client.qscfadm
def write(self, todolist):
self._connect()
# Remove al existing documents
self._mongo_db[self._collectionName].remove({})
# Convert to serialized format, then insert all the todo items in the list
self._mongo_db[self._collectionName].insert_many([itm.serialize() for itm in todolist])
def read(self):
# Fetch all the items into the cursor & turn into a list
self._connect()
return list(self._mongo_db[self._collectionName].find({}))
class TodoListCSVFileStorage(object):
'''Used by TodoList, this class saves & loads to a CSV file'''
def __init__(self, filename):
self._filename = filename + '.csv'
def write(self, todolist):
with open(self._filename, 'wb') as csvfile:
cwriter = csv.writer(csvfile, quoting=csv.QUOTE_NONNUMERIC)
for itm in todolist:
d = itm.serialize()
cwriter.writerow((d['id'], d['text'], d['status'], d['duedaysahead'], d['creationdate']))
def read(self):
with open(self._filename, 'rb') as csvfile:
creader = csv.reader(csvfile, quoting=csv.QUOTE_NONNUMERIC)
return [{'id':row[0], 'text':row[1], 'status':row[2], 'duedaysahead':int(row[3]), 'creationdate':row[4]} for row in creader]
|
11,948 | 8e6b0e58c7e8af1cfd6ab9688d581d39ae759be3 | # Generated by Django 3.2.5 on 2021-07-18 01:11
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('laRoja', '0007_alter_noticia_imagen'),
]
operations = [
migrations.CreateModel(
name='Contactenos',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('nombre', models.CharField(max_length=50)),
('apellido', models.CharField(max_length=50)),
('telefono', models.IntegerField(null=True)),
('email', models.EmailField(max_length=254, null=True)),
('comentario', models.TextField(null=True)),
],
),
]
|
11,949 | 0288d1bf712edc9eefc520aed698015c09cdb283 | game_array = [[0, 0, 0],
[0, 0, 0],
[0, 0, 0],]
def game_board(game_map, value=0, row=0, column=0):
try:
game_map[row][column]=value
print(" a b c")
for count, array in enumerate(game_map):
print (count,array)
return game_map
except IndexError as e:
print("Error: Make sure you give row/column as 0, 1 and 2",e)
except Exception as e:
print("Spmeting went terribly wrong!",e)
game_array=game_board(game_array,value=1,row=3,column=1)
game_array=game_board(game_board,value=1,row=3,column=1)
|
11,950 | ad61552496e9278e43b791adcfcb3fadcf656d46 | from KnowledgeAcquirer import KnowledgeAcquirer
import json
class KnowledgeBase:
def __init__(self):
self.knowledgeAcquirer = KnowledgeAcquirer()
self.facts = []
def getFacts(self, question):
self.facts = self.getKnownFacts(question)
if (self.facts == []):
self.facts = self.knowledgeAcquirer.getFacts(question)
return self.facts
def getKnownFacts(self,question):
return []
|
11,951 | c3be103b8c5dbd741575df91ba7b9b01cfb6b7a0 | import cv2
import easygui
import os
def set_cam():
filepath = "setting.txt"
if os.path.isfile(filepath):
print("setting.txt存在。")
f = open('setting.txt','r')
#print(type(f.read()))
return int(f.read())
else:
print("setting.txt不存在。")
for i in range(10):
camera_check= cv2.VideoCapture(i).isOpened()#Returns true if video capturing has been initialized already.
next_camera_check= cv2.VideoCapture(i+1).isOpened()
print('i :',i, 'check: ',camera_check)
print('i+1 :',i+1, 'check: ',next_camera_check)
cap = cv2.VideoCapture(i)
#print("check:",i, camera_check)
while(True):
ret, frame = cap.read()
if camera_check == False:
print("error")
cv2.imshow('frame', frame)
value = easygui.ynbox("Is this the video camera?")
cap.release()
cv2.destroyAllWindows()
break
#print("ans : i == ",i)
if value == True:
fp = open("setting.txt", "a")
fp.write(str(i))
fp.close()
#print("insert")
return i
else:
continue
#if cv2.waitKey(1) & 0xFF == ord('q'):
# break
#print("camera",camera_check)
if __name__ == "__main__":
set_cam() |
11,952 | 22a5b8e75044574bed0dd4ee4d4b2efb842fabb2 | import attr
import re
import requests
from base import config
from base.constants import TelegramConstants
from logic.bigram_model import BigramModel
@attr.s
class Message(object):
speaker = attr.ib()
text = attr.ib()
class ModelCreatorLogic(object):
DATETIME = re.compile(r'\[?\d{1,2}[/.]\d{1,2}[/.]\d{2,4}, \d{1,2}:\d{2}(?::\d{1,2})?(?: (?:AM|PM))?\]?(?: -)? ')
SENDER_TEXT = re.compile(r'([^:]*): (.*)', flags=re.S)
def __init__(self, model_uid):
self.model_uid = model_uid
self.messages = []
self.speakers = set()
@staticmethod
def datetime_split(chat):
return ModelCreatorLogic.DATETIME.split(chat)
def split_chat(self, chat):
"""Splits WhatsApp chat into speakers and Messages.
Args:
chat (str): Text of exported WhatsApp chat.
Returns:
None. Populates `self.speakers` and `self.messages` with
`chat`'s data.
"""
senders_messages = self.datetime_split(chat)
for sender_message in senders_messages:
message = self._get_message(sender_message)
if message:
self.messages.append(message)
self.speakers.add(message.speaker)
def _get_message(self, sender_message):
# type: (str) -> Message or None
"""Creates a Message out of WhatsApp chat line.
Args:
sender_message (str): WhatsApp chat line.
Returns:
Message where `speaker` is `sender_message`'s speaker and `text` is
`sender_message`'s text. Returns None if format is wrong.
Examples:
>>> self._get_message('10/19/13, 20:09 - itamar: Hi')
Message(speaker='itamar', text='Hi')
>>> self._get_message('WRONG FORMAT')
<None>
"""
st_re = self.SENDER_TEXT.search(sender_message)
if st_re is None:
return None
else:
return Message(speaker=st_re.group(1), text=st_re.group(2).strip())
def get_user_messages(self, speaker):
# type: (str) -> [Message]
"""Returns all messages of `speaker`.
Args:
speaker (str): Speaker whose messages we want.
Returns:
List of Messages whose `.speaker` is `speaker`.
"""
return list(filter(lambda m: m.speaker == speaker, self.messages))
def build_and_save_model(self, messages):
bigram_model = BigramModel()
bigram_model.train_string(self._clean_text_from_messages(messages))
bigram_model.set_model_uid(self.model_uid)
bigram_model.save_model()
def _clean_text_from_messages(self, msgs):
return '\n'.join(m.text.replace('<Media omitted>', '') for m in msgs)
def set_webhook(self):
response = requests.post(
TelegramConstants.TELEGRAM_WEBHOOK.format(self.model_uid),
json={'url': config.app_url.format(self.model_uid)}
)
return response.status_code
|
11,953 | 32742376dd67ceac7d6e5fa3788dd3c42b319a3e | # Google Code Jam 2012
# Pawel Przytula
# p.przytula@students.mimuw.edu.pl
# Input reading helpers
import sys
def readline():
s = sys.stdin.readline()
return s.strip()
def readints():
return [int(x) for x in readline().split()]
# END helpers ---------
if __name__ == "__main__":
t = readints()[0]
for i in xrange(t):
test = readints()
s = test[1]
p = test[2]
scores = test[3:]
def score_test(fun, count_surprise):
global s
def test(acc, (x, m)):
global s
if count_surprise:
if s > 0:
r = int(fun(x, m))
s -= r
acc += r
else:
acc += int(fun(x, m))
return acc
return test
pscores = [(x / 3, x % 3) for x in scores]
is_score_gte_p = lambda x, m: x + int(m >= 1) >= p
result = reduce(score_test(is_score_gte_p, False), pscores, 0)
pscores = filter(lambda (x, m): not is_score_gte_p(x, m), pscores)
# surprising
is_score_gte_p = lambda x, m: (m == 2 and x + 2 >= p) or (x + 1 >= p and x > 0)
result = reduce(score_test(is_score_gte_p, True), pscores, result)
print "Case #%s: %s" % (i + 1, result)
|
11,954 | 9ac9e09df72620cb9d948877c0e53aaedc1d0fd2 | # -*- coding: utf-8 -*-
"""
URL: http://adventofcode.com/2017/day/1
Part 1:
The captcha requires you to review a sequence of digits (your puzzle input)
and find the sum of all digits that match the next digit in the list.
The list is circular, so the digit after the last digit is the first digit
in the list.
Part 2:
Now, instead of considering the next digit, it wants you to consider the
digit halfway around the circular list. That is, if your list contains
10 items, only include a digit in your sum if the digit 10/2 = 5 steps
forward matches it. Fortunately, your list has an even number of elements.
Examples:
---------
>>> get_answer(1122, part=1)
3
>>> get_answer(1234, part=1)
0
>>> get_answer(91212129, part=1)
9
>>> get_answer(1212, part=2)
6
>>> get_answer(1221, part=2)
0
>>> get_answer(123425, part=2)
4
>>> get_answer(123123, part=2)
12
>>> get_answer(12131415, part=2)
4
"""
from itertools import cycle
def get_answer(puzzle_input, part=1):
length = len(str(puzzle_input))
digits = cycle(str(puzzle_input))
if part == 1:
step = 1
else:
step = length // 2
digits_list = []
for _ in range(length + step):
digits_list.append(next(digits))
answer = 0
for index in range(length):
if digits_list[index] == digits_list[index+step]:
answer += int(digits_list[index])
return answer
if __name__ == "__main__":
import doctest
doctest.testmod()
|
11,955 | fd82eec3d1a2abb2d2f4bae48c6a8187fddb47a9 | import os
class Export:
def __init__(self, filename, data):
self.__filename = filename
self.__data = data
def get_filename(self):
return self.__filename
def get_data(self):
return self.__data
def import_data(self):
f = open(os.path.abspath('export') + "/" + self.__filename, "a")
for k, v in self.__data.items():
f.write(str(k) + ": " + str(v) + "\n")
return 'ok'
|
11,956 | 58bd9e4c1590ee9698fa060477883680f830b318 | import sys
from userfile import initial_position, primitive, gen_moves, do_moves
def solve(parents):
children = []
# generate all children of parents;
# end if one is a win, remember ties,
# exclude primitives from being parents next
for p in parents:
moves = gen_moves(p)
for m in moves:
d = do_moves(p,m)
if primitive(d) == "win":
print "win"
sys.exit()
elif primitive(d) == "tie":
ties.append(d)
elif primitive(d) != "loss":
children.append(d)
# if children, make them parents
# no children -> primitives reached
if children:
solve(children)
elif not children:
if ties:
print "tie"
else:
print "loss"
#### ---- ####
parents = [initial_position]
ties = []
solve(parents)
|
11,957 | c13e3456022e333ceaa49391e7a3f66786bded0e | # Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from tests.st.test_base import TestBase, HOST_IPV6
from tests.st.utils.docker_host import DockerHost
"""
Test the calicoctl container <CONTAINER> ip add/remove commands w/ auto-assign
Tests the use of (libcalico) pycalico.ipam.IPAMClient.auto_assign_ips
within calicoctl container ip add.
"""
class TestAutoAssignIp(TestBase):
def __init__(self, *args, **kwargs):
super(TestAutoAssignIp, self).__init__(*args, **kwargs)
self.DEFAULT_IPV4_POOL = "192.168.0.0/16"
self.DEFAULT_IPV6_POOL = "fd80:24e2:f998:72d6::/64"
def _setup_env(self, host, count=2, ip="ipv4", profile="TEST"):
workloads = []
host.calicoctl("profile add {0}".format(profile))
for x in xrange(count):
workloads.append(host.create_workload("workload" + str(x)))
host.calicoctl("container add {0} {1}".format(workloads[x], ip))
host.calicoctl("container {0} profile set {1}".format(
workloads[x], profile))
return workloads
def test_add_autoassigned_ipv4(self):
"""
Test "calicoctl container add <container> ipv4"
"""
with DockerHost('host', dind=False) as host:
# Test that auto-assiging IPv4 addresses gives what we expect
workloads = self._setup_env(host, count=2, ip="ipv4")
workloads[0].assert_can_ping("192.168.0.1", retries=3)
workloads[1].assert_can_ping("192.168.0.0", retries=3)
host.calicoctl("container remove {0}".format("workload0"))
host.calicoctl("container remove {0}".format("workload1"))
host.remove_workloads()
# Test that recreating returns the next two IPs (IPs are not
# reassigned automatically unless we have run out of IPs).
workloads = self._setup_env(host, count=2, ip="ipv4")
workloads[0].assert_can_ping("192.168.0.3", retries=3)
workloads[1].assert_can_ping("192.168.0.2", retries=3)
@unittest.skipUnless(HOST_IPV6, "Host does not have an IPv6 address")
def test_add_autoassigned_ipv6(self):
"""
Test "calicoctl container add <container> ipv6"
"""
with DockerHost('host', dind=False) as host:
# Test that auto-assiging IPv4 addresses gives what we expect
workloads = self._setup_env(host, count=2, ip="ipv6")
workloads[0].assert_can_ping("fd80:24e2:f998:72d6::1", retries=3)
workloads[1].assert_can_ping("fd80:24e2:f998:72d6::", retries=3)
host.calicoctl("container remove {0}".format("workload0"))
host.calicoctl("container remove {0}".format("workload1"))
host.remove_workloads()
# Test that recreating returns the next two IPs (IPs are not
# reassigned automatically unless we have run out of IPs).
workloads = self._setup_env(host, count=2, ip="ipv6")
workloads[0].assert_can_ping("fd80:24e2:f998:72d6::3", retries=3)
workloads[1].assert_can_ping("fd80:24e2:f998:72d6::2", retries=3)
def test_add_autoassigned_pool_ipv4(self):
"""
Test "calicoctl container add <container> <IPv4 CIDR>"
(192.168.0.0/16)
"""
with DockerHost('host', dind=False) as host:
# Test that auto-assiging IPv4 addresses gives what we expect
workloads = self._setup_env(host, count=2,
ip=self.DEFAULT_IPV4_POOL)
workloads[0].assert_can_ping("192.168.0.1", retries=3)
workloads[1].assert_can_ping("192.168.0.0", retries=3)
@unittest.skipUnless(HOST_IPV6, "Host does not have an IPv6 address")
def test_add_autoassigned_pool_ipv6(self):
"""
Test "calicoctl container add <container> <IPv6 CIDR>"
(fd80:24e2:f998:72d6::/64)
"""
with DockerHost('host', dind=False) as host:
# Test that auto-assiging IPv6 addresses gives what we expect
workloads = self._setup_env(host, count=2,
ip=self.DEFAULT_IPV6_POOL)
workloads[0].assert_can_ping("fd80:24e2:f998:72d6::1", retries=3)
workloads[1].assert_can_ping("fd80:24e2:f998:72d6::", retries=3)
|
11,958 | 80697755c72f5574dc3dffc5b617c83dfbab71d3 | from collections.abc import Iterable, Mapping, Sequence
from datetime import datetime
from typing import Any, Generic, TypeVar
from django.contrib.sites.models import Site
from django.contrib.sites.requests import RequestSite
from django.core.paginator import Paginator
from django.db.models.base import Model
from django.db.models.query import QuerySet
PING_URL: str
class SitemapNotFound(Exception): ...
def ping_google(sitemap_url: str | None = ..., ping_url: str = ..., sitemap_uses_https: bool = ...) -> None: ...
_ItemT = TypeVar("_ItemT")
class Sitemap(Generic[_ItemT]):
limit: int
protocol: str | None
i18n: bool
languages: Sequence[str] | None
alternates: bool
x_default: bool
def items(self) -> Iterable[_ItemT]: ...
def location(self, item: _ItemT) -> str: ...
@property
def paginator(self) -> Paginator: ...
def get_languages_for_item(self, item: _ItemT) -> list[str]: ...
def get_protocol(self, protocol: str | None = ...) -> str: ...
def get_domain(self, site: Site | RequestSite | None = ...) -> str: ...
def get_urls(
self, page: int | str = ..., site: Site | RequestSite | None = ..., protocol: str | None = ...
) -> list[dict[str, Any]]: ...
def get_latest_lastmod(self) -> datetime | None: ...
_ModelT = TypeVar("_ModelT", bound=Model)
class GenericSitemap(Sitemap[_ModelT]):
priority: float | None
changefreq: str | None
queryset: QuerySet[_ModelT]
date_field: str | None
protocol: str | None
def __init__(
self,
info_dict: Mapping[str, datetime | QuerySet[_ModelT] | str],
priority: float | None = ...,
changefreq: str | None = ...,
protocol: str | None = ...,
) -> None: ...
def items(self) -> QuerySet[_ModelT]: ...
def lastmod(self, item: _ModelT) -> datetime | None: ...
def get_latest_lastmod(self) -> datetime | None: ...
|
11,959 | 0a9ab664c67996e19c2b37cb83f4d413b1e3acbe | import numpy as np
import matlotlib.pyplot as plt
import mnist
from conv import Conv3x3
image = plt.imread('ITCrowd.png')
def __init__(self, num_filters):
self.num_filters = num_filters
# num_filters max boyutu 8,4.. gibi
self.filters = np.random.randn(num_filters, 3, 3) / 9 # 8 tane (3,3) luk matris üretti.
# mask in değerlerini random üretti ilerideki aşamalarda güncelleyecek.
def iterate_regions(self, image): # resim geldi.
'''
Generates all possible 3x3 image regions using valid padding.
- image is a 2d numpy array
'''
h, w = image.shape # en,boy ,değerleri 28x8
# resim üzerindeki son iki satırları ilerlemiyor. mask 3x3 olduğu için -2 oldu.
for i in range(h - 2):
for j in range(w - 2):
im_region = image[i:(i + 3), j:(j + 3)]
yield im_region, i, j
def forward(self, input):
'''
Performs a forward pass of the conv layer using the given input.
Returns a 3d numpy array with dimensions (h, w, num_filters).
- input is a 2d numpy array
'''
h, w = input.shape
output = np.zeros((h - 2, w - 2, self.num_filters))
# !!önemli!!!#
for im_region, i, j in self.iterate_regions(input):
output[i, j] = np.sum(im_region * self.filters, axis=(1, 2))
return output
# The mnist package handles the MNIST dataset for us!
# Learn more at https://github.com/datapythonista/mnist
train_images = mnist.train_images()
train_labels = mnist.train_labels()
conv = Conv3x3(8)
output = conv.forward(train_images[0])
print(output.shape) # (26, 26, 8)
|
11,960 | b0bee03073e189c1957bdbd8be4a610daef57835 | class Solution:
# @param A : list of integers
# @return an integer
def maxp3(self, A):
A = sorted(A)
if A[-1] < 0:
return A[-1] * A[-2] * A[-3]
else:
third_number = A[-1]
other_two_mult = max((A[0]* A[1]), (A[-2]* A[-3]))
return third_number * other_two_mult
|
11,961 | 44389101955f2d7259b1deaf4b9f2092f589f529 | from django.http.response import HttpResponse
from django.shortcuts import render, redirect
from django.views.generic.base import View
from django.conf import settings
import itertools
import json
from datetime import datetime
import random
def read_news_from_json() -> []:
with open(settings.NEWS_JSON_PATH, 'r') as f:
return json.load(f)
def append_news_to_json(data: [{}, ]):
with open(settings.NEWS_JSON_PATH, 'w') as f:
f.write(json.dumps(data, indent=4))
def coming_soon(request):
# return HttpResponse('Coming soon!')
return redirect('/news/')
news_data = read_news_from_json()
class Index(View):
def get(self, request):
q = request.GET.get('q')
if q is None:
sorted_news = sorted(news_data, key=lambda i: i['created'], reverse=True)
else:
found_news = []
for news in news_data:
if q in news['title']:
found_news.append(news)
sorted_news = sorted(found_news, key=lambda i: i['created'], reverse=True)
grouped_news = {}
for key, value in itertools.groupby(sorted_news, lambda i: i['created'][:10]):
grouped_news[key] = list(value)
return render(request, 'home.html', context={'grouped_news': grouped_news})
class NewsView(View):
def get(self, request, link, *args, **kwargs):
for news in news_data:
if link == news['link']:
return render(request, 'news.html', context=news)
return redirect('/news/')
class CreateNewsView(View):
def get(self, request, *args, **kwargs):
return render(request, 'create.html')
def post(self, request, *args, **kwargs):
created = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
link = random.randint(1, 10000)
while link in [i['link'] for i in news_data]:
link = random.randint(1, 10000)
title = request.POST.get('title')
text = request.POST.get('text')
news_data.append({'created': created,
'link': link,
'title': title,
'text': text})
append_news_to_json(news_data)
return redirect('/news/')
|
11,962 | 570de10c5fa5aafcacd808447e7e9564b2e288cf | #!/usr/bin/python3
# coding: utf8
import os
import json
import tables
import datetime
import h5py as h5
import pandas as pd
from pickle import UnpicklingError
from tqdm import tqdm
from sxapi import LowLevelAPI, APIv2
from sxapi.low import PrivateAPIv2
from anthilldb.client import DirectDBClient
from anthilldb.settings import get_config_by_name
from tables.exceptions import HDF5ExtError
from concurrent.futures import (
ThreadPoolExecutor,
ProcessPoolExecutor,
as_completed
)
with open(os.path.abspath(os.path.join(os.getcwd(), 'token.json'))) as file:
doc = json.load(file)
live_token_string = doc['live']
staging_token_string = doc['staging']
LIVECONFIG = get_config_by_name('live')
PRIVATE_STAGING_TOKEN = staging_token_string
PRIVATE_LIVE_TOKEN = live_token_string
PRIVATE_ENDPOINT = 'http://127.0.0.1:8787/internapi/v1/'
PUBLIC_ENDPOINTv2 = 'https://api.smaxtec.com/api/v2/'
PRIVATE_ENDPOINTv2 = 'http://127.0.0.1:8787/internapi/v2/'
os.environ['GOOGLE_APPLICATION_CREDENTIALS'] = os.path.abspath(
os.path.join(os.getcwd(), 'key.json'))
class DataLoader(object):
def __init__(self):
self.api = APIv2(
api_key=PRIVATE_LIVE_TOKEN, endpoint=PUBLIC_ENDPOINTv2,
asynchronous=True).low
self.privateapi = PrivateAPIv2(
api_key=PRIVATE_LIVE_TOKEN, endpoint=PRIVATE_ENDPOINTv2)
self.oldapi = LowLevelAPI(
api_key=PRIVATE_LIVE_TOKEN,
private_endpoint=PRIVATE_ENDPOINT).privatelow
self.dbclient = DirectDBClient(
engine='bigtable',
engine_options=LIVECONFIG.ENGINE_OPTIONS,
table_prefix=LIVECONFIG.TABLE_PREFIX)
self.thread_pool = ThreadPoolExecutor(30)
self.process_pool = ProcessPoolExecutor(os.cpu_count())
self.store_path = os.path.join(
os.getcwd(), 'ml_heat', '__data_store__')
if not os.path.exists(self.store_path):
os.mkdir(self.store_path)
self.rawdata_path = os.path.join(self.store_path, 'rawdata.hdf5')
self._organisation_ids = None
self._animal_ids = None
self._animal_orga_map = None
def readfile(self):
return h5.File(self.rawdata_path, 'r')
def writefile(self):
return h5.File(self.rawdata_path, 'a')
def get_data(self, animal_id, from_dt, to_dt, metrics):
return self.thread_pool.submit(
self.dbclient.get_metrics, animal_id, metrics, from_dt, to_dt)
@property
def organisation_ids(self):
if not self._organisation_ids:
with self.readfile() as file:
self._organisation_ids = list(file['data'].keys())
return self._organisation_ids
@property
def animal_ids(self):
if not self._animal_ids:
self._animal_ids = []
with self.readfile() as file:
for organisation_id in self._organisation_ids:
self._animal_ids += list(
file[f'data/{organisation_id}'].keys())
self._animal_ids = [
x for x in self._animal_ids if x != 'organisation']
return self._animal_ids
def animal_ids_for_organisations(self, organisation_ids):
animal_ids = []
with self.readfile() as file:
for organisation_id in organisation_ids:
ids = list(file[f'data/{organisation_id}'].keys())
filtered = [x for x in ids if x != 'organisation']
animal_ids += filtered
return animal_ids
def organisation_id_for_animal_id(self, animal_id):
if self._animal_orga_map is None:
with self.readfile() as file:
self._animal_orga_map = json.loads(
file['lookup/animal_to_orga'][()])
try:
return self._animal_orga_map[animal_id]
except KeyError:
return None
def load_organisations(self, update=False):
# TODO: switch to apiv2 or anthilldb once implemented
with self.writefile() as file:
if 'data' not in file.keys() or update:
print('Loading organisations')
organisations = self.oldapi.query_organisations()
try:
data_group = file.create_group(name='data')
except ValueError:
data_group = file['data']
print('Storing organisations to local cache')
for organisation in organisations:
# anonymization
organisation = self.sanitize_organisation(organisation)
try:
orga_group = data_group.create_group(
name=organisation['_id'])
except ValueError:
orga_group = data_group[organisation['_id']]
if 'organisation' in orga_group.keys():
del orga_group['organisation']
orga_group.create_dataset(
name='organisation',
data=json.dumps(organisation))
else:
print('Organisations found in store, skipped loading')
def load_animals(self, organisation_ids=None, update=False):
if organisation_ids is None:
organisation_ids = self.organisation_ids
filtered_orga_ids = None
if not update:
with self.readfile() as file:
keys = file['data'].keys()
keys = [x for x in keys if len(file[f'data/{x}'].keys()) > 1]
filtered_orga_ids = [x for x in organisation_ids if x not in keys]
if not filtered_orga_ids or filtered_orga_ids is None:
if not update:
return
else:
filtered_orga_ids = organisation_ids
futures = [self.thread_pool.submit(
self.api.get_animals_by_organisation_id,
organisation_id)
for organisation_id in filtered_orga_ids]
kwargs = {
'total': len(futures),
'unit': 'organisations',
'unit_scale': True,
'leave': True,
'smoothing': 0.001,
'desc': 'Loading animals'
}
for f in tqdm(as_completed(futures), **kwargs):
pass
animals = [x for future in futures for x in future.result()]
kwargs = {
'desc': 'Loading additional events for animals',
'unit': 'animals',
'smoothing': 0.001
}
with self.writefile() as file:
if self._animal_orga_map is None:
try:
self._animal_orga_map = json.loads(
file['lookup/animal_to_orga'][()])
except Exception:
self._animal_orga_map = {}
kwargs['desc'] = 'Storing animals'
for animal in tqdm(animals, **kwargs):
organisation_id = animal['organisation_id']
orga_group = file[f'data/{organisation_id}']
if animal['_id'] in orga_group.keys():
a_subgroup = file[
f'data/{organisation_id}/{animal["_id"]}']
if 'animal' in a_subgroup.keys():
if 'events' in a_subgroup.keys():
continue
events = self.privateapi.get_events_by_animal_id(
animal['_id'], True)
self._animal_orga_map[animal['_id']] = organisation_id
try:
a_subgroup = orga_group.create_group(
name=animal['_id'])
except ValueError:
a_subgroup = orga_group[animal['_id']]
if 'animal' in a_subgroup.keys():
del a_subgroup['animal']
a_subgroup.create_dataset(
name='animal',
data=json.dumps(animal))
if 'events' in a_subgroup.keys():
del a_subgroup['events']
a_subgroup.create_dataset(
name='events',
data=json.dumps(events))
try:
lookup_group = file.create_group('lookup')
except ValueError:
lookup_group = file['lookup']
if 'animal_to_orga' in lookup_group.keys():
del lookup_group['animal_to_orga']
lookup_group.create_dataset(
name='animal_to_orga',
data=json.dumps(self._animal_orga_map))
def load_sensordata_from_db(self,
organisation_ids=None,
update=False,
metrics=['act', 'temp']):
print('Preparing to load sensor data...')
from_dt = datetime.datetime(2018, 4, 1)
to_dt = datetime.datetime(2020, 4, 1)
self.dbclient.service_init()
if organisation_ids is None:
organisation_ids = self.organisation_ids
# retrieve animal ids to load data for
animal_ids = self.animal_ids_for_organisations(organisation_ids)
temp_path = os.path.join(self.store_path, 'temp')
# determine which datafiles haven't been loaded yet
if not update:
filtered = []
with self.readfile() as file:
for organisation_id in organisation_ids:
keys = list(file[f'data/{organisation_id}'].keys())
animal_ids = [key for key in keys if key != 'organisation']
for animal_id in animal_ids:
a_keys = list(
file[f'data/{organisation_id}/{animal_id}'].keys())
if len(a_keys) < 3:
filtered.append(animal_id)
animal_ids = filtered
# check if csv or pickle file exists already
if os.path.exists(temp_path):
files = os.listdir(temp_path)
keys = list(set([string.split('|')[0] for string in files]))
animal_ids = [
animal_id for animal_id in animal_ids
if animal_id not in keys]
if not os.path.exists(temp_path):
os.mkdir(temp_path)
# print('Loading sensordata...')
kwargs = {
'desc': 'Loading sensordata',
'unit': 'animals',
'smoothing': 0.001
}
for _id in tqdm(animal_ids, **kwargs):
self.download_metrics(
self.dbclient, _id, metrics, from_dt, to_dt, temp_path)
print('Download finished...')
def download_metrics(
self, db_client, key, metrics, from_dt, to_dt, output_file_path):
chunks = []
while (to_dt - from_dt).total_seconds() >= 399 * 24 * 60 * 60:
chunk = (from_dt, from_dt + datetime.timedelta(days=399))
chunks.append(chunk)
from_dt += datetime.timedelta(days=398)
chunks.append((from_dt, to_dt))
for idx, chunk in enumerate(chunks):
all_timeseries = db_client.get_multi_metrics(
key, metrics, chunk[0], chunk[1])
file_name = os.path.realpath(
os.path.join(output_file_path, f"{key}|{idx}.csv"))
with open(file_name, "w") as fp:
all_timeseries.to_csv(fp)
fp.flush()
return key
def csv_to_hdf(self):
print('Writing data to hdf file...')
temp_path = os.path.join(self.store_path, 'temp')
files = [s for s in os.listdir(temp_path) if s.endswith('.csv')]
filepaths = [os.path.join(temp_path, p) for p in files]
animal_ids = list(set([s.split('|')[0] for s in files]))
iterdict = {}
for animal_id in tqdm(animal_ids, desc='Parsing filepaths'):
iterdict[animal_id] = [
path for path in filepaths if path.split(
os.sep)[-1].startswith(animal_id)]
for key in tqdm(iterdict.keys()):
organisation_id = self.organisation_id_for_animal_id(key)
framelist = []
for filepath in iterdict[key]:
framelist.append(pd.read_csv(filepath, index_col='ts'))
frame = pd.concat(framelist).sort_index()
frame = frame.loc[~frame.index.duplicated(keep='first')]
frame.index = pd.to_datetime(
frame.index, unit='s').rename('datetime')
if frame.empty:
for filepath in iterdict[key]:
os.remove(filepath)
continue
frame.to_hdf(
self.rawdata_path,
key=f'data/{organisation_id}/{key}/sensordata',
complevel=9)
for filepath in iterdict[key]:
os.remove(filepath)
print('Finished saving rawdata...')
print('Cleaning up...')
os.rmdir(temp_path)
print('Done!')
def animal_count_per_orga(self):
organisation_ids = self.organisation_ids
data = {}
for organisation_id in organisation_ids:
animal_ids = self.animal_ids_for_organisations(organisation_id)
data[organisation_id] = len(animal_ids)
return pd.DataFrame(data)
def sanitize_organisation(self, organisation):
organisation.pop('name', None)
organisation.pop('account', None)
return organisation
def fix_dt_index(self):
# collect organisations & animals
iterdict = {}
kwargs = {
'desc': 'Discovering data',
'smoothing': 0.01
}
with self.writefile() as file:
organisation_ids = file['data'].keys()
for organisation_id in tqdm(organisation_ids, **kwargs):
iterdict[organisation_id] = []
animal_ids = [
key for key in file[f'data/{organisation_id}'].keys()
if key != 'organisation'
]
for animal_id in animal_ids:
if 'sensordata' in file[
f'data/{organisation_id}/{animal_id}'].keys():
iterdict[organisation_id].append(animal_id)
kwargs['desc'] = 'Converting index datatype'
for organisation_id, animal_ids in tqdm(iterdict.items(), **kwargs):
kwargs['desc'] = 'Processing animals'
kwargs['leave'] = False
kwargs['position'] = 1
for animal_id in tqdm(animal_ids, **kwargs):
try:
frame = pd.read_hdf(
self.rawdata_path,
key=f'data/{organisation_id}/{animal_id}/sensordata'
)
except HDF5ExtError as e:
print(e)
print(f'data read failed on animal {animal_id}, '
f'organisation {organisation_id}')
tables.file._open_files.close_all()
continue
except UnpicklingError as e:
print(e)
print(f'data read failed on animal {animal_id}, '
f'organisation {organisation_id}')
tables.file._open_files.close_all()
continue
if isinstance(
frame.index, pd.core.indexes.datetimes.DatetimeIndex):
continue
frame.index = pd.to_datetime(
[int(dt.timestamp()) for dt in frame.index],
unit='s'
)
frame.to_hdf(
self.rawdata_path,
key=f'data/{organisation_id}/{animal_id}/sensordata',
complevel=9
)
def del_sensordata(self):
"""
In rare cases, pandas can produce broken datasets when writing to
hdf5, this function can be used to delete them so they can be either
downloaded again or discarded
USE WITH UTTERMOST CARE
"""
organisation_id = '5af01e0210bac288dba249ad'
animal_id = '5b6419ff36b96c52808951b1'
with self.writefile() as file:
del file[f'data/{organisation_id}/{animal_id}/sensordata']
def run(self, organisation_ids=None, update=False):
self.load_organisations(update)
self.load_animals(organisation_ids=organisation_ids, update=update)
self.load_sensordata_from_db(
organisation_ids=organisation_ids, update=update)
self.csv_to_hdf()
def main():
loader = DataLoader()
loader.run()
if __name__ == '__main__':
main()
|
11,963 | 8bcf943657aa93117c951a2e31ac62e71cdb3f5f | # -*- coding: utf-8 -*-
"""
Created on Mon Feb 09 16:50:14 2015
@author: VishnuC
@email: vrajs5@gmail.com
Beating the benchmark for Microsoft Malware Classification Challenge (BIG 2015)
"""
from joblib import Parallel, delayed
import os
import gzip
from csv import writer
import six
import numpy as np
read_mode, write_mode = ('r','w') if six.PY2 else ('rt','wt')
path = '/home/zak/kaggle/malware/' #Path to project
os.chdir(path)
if six.PY2:
from itertools import izip
zp = izip
else:
zp = zip
# Give path to gzip of asm files
paths = ['train','test']
def consolidate(path):
''' A consolidation of given train or test files
This function reads each asm files (stored in gzip format)
and prepare summary. asm gzip files are stored in train_gz
and test_gz locating.
'''
s_path = path + '/'
Files = os.listdir(s_path)
byteFiles = [i for i in Files if '.bytes.gz' in i]
consolidatedFile = path + '_consolidation.gz'
with gzip.open(consolidatedFile, write_mode) as f:
# Preparing header part
fw = writer(f)
colnames = ['filename', 'no_que_mark']
colnames += ['file_size', 'two_byte_sum']
colnames += ['TB_'+hex(i)[2:] for i in range(16**2)]
colnames += ['FB_'+hex(i)[2:] for i in range(16**4)]
fw.writerow(colnames)
# Creating row set
consolidation = []
for t, fname in enumerate(byteFiles):
file_size = os.path.getsize(s_path+fname)
f = gzip.open(s_path+fname, read_mode)
twoByte = np.zeros(16**2, dtype=np.int)
#fourByte = np.zeros(16**4, dtype=np.int)
no_que_mark = 0
for row in f:
codes = row[:-2].split()[1:]
# Finding number of times ?? appears
no_que_mark += codes.count('??')
# Conversion of code to to two byte
twoByteCode = np.fromiter((int(i,16) for i in codes if i != '??'), np.int)
#fourByteCode = np.fromiter((int(codes[i]+codes[i+1],16) for i in range(len(codes)-1) if ((codes[i] != '??') and (codes[i+1] != '??'))), np.int)
# Frequency calculation of two byte codes
for i in twoByteCode:
twoByte[i] += 1
#for i in fourByteCode:
# fourByte[i] += 1
two_byte_sum = np.sum(twoByte)
if two_byte_sum == 0:
print codes
#four_byte_sum = np.sum(fourByte)
if two_byte_sum == 0:
two_byte = twoByte
else:
two_byte = np.fromiter((float(i)/two_byte_sum for i in twoByte), np.float)
#if four_byte_sum == 0:
# four_byte = fourByte
#else:
#four_byte = np.fromiter((float(i)/four_byte_sum for i in fourByte), np.float)
# Row added
consolidation.append([fname[:fname.find('.bytes.gz')], no_que_mark] + \
[file_size] + [two_byte_sum] + \
np.ndarray.tolist(two_byte))# + np.ndarray.tolist(four_byte))
# Writing rows after every 100 files processed
if (t+1)%100==0:
print(t+1, 'files loaded for ', path)
fw.writerows(consolidation)
consolidation = []
# Writing remaining files
if len(consolidation)>0:
fw.writerows(consolidation)
consolidation = []
del Files, byteFiles, colnames, s_path, consolidation, f, fw, \
twoByte, twoByteCode, consolidatedFile
if __name__ == '__main__':
#for path in paths:
# consolidate(path)
Parallel(n_jobs=2)(delayed(consolidate)(subpath) for subpath in paths)
|
11,964 | 389d7aaaa82ffa230168cf5ce02c6db600d12cf2 | # Generated by Django 3.1.7 on 2021-05-09 11:54
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('userpage', '0005_auto_20210508_1717'),
]
operations = [
migrations.AddField(
model_name='userdetails',
name='district',
field=models.CharField(blank=True, max_length=30),
),
migrations.AddField(
model_name='userdetails',
name='state',
field=models.CharField(blank=True, max_length=30),
),
migrations.AlterField(
model_name='userdetails',
name='pinCode',
field=models.CharField(blank=True, default='00000', max_length=6),
),
]
|
11,965 | c4fdc6bfd5a6164ebce28aa6829b60e45865278c | import pathGenerator
import bucketVisualization
import SegmentedFourierRepresentation
import matplotlib.pyplot as plt
#orange is approximation, black is actual
read_in_trajectories = pathGenerator.read_trajectories_from_file("circling_400(20, 15, 15, 10, 10, 10, 10, 10, 0, 0).txt")
trajectory = read_in_trajectories[5] # change this number to look at different trajectories
#pathGenerator.display_trajectory(trajectory)
x = []
y = []
for point in trajectory:
if point[0] != "invisible":
x.append(point[0])
y.append(point[1])
plt.plot(x, y, 'o', color='black')
segmented_approximation_parameters_plus_centroid = SegmentedFourierRepresentation.processSegmentedTrajectory(trajectory, 14, 10)
segmented_approximation_parameters = []
for segment in segmented_approximation_parameters_plus_centroid:
segmented_approximation_parameters.append(segment[0])
(x, y) = bucketVisualization.generateSegmentedApproximation(segmented_approximation_parameters)
plt.plot(x, y, 'o', color='orange')
plt.show()
|
11,966 | 6f96eec87c56a56880b6c0fb394e9099380cf3cc | # -*- coding: utf-8 -*-
"""
Created on Mon Mar 4 17:02:22 2019
@author: scream
"""
import math
import pickle
with open("arkanoid/log/2019-04-15_13-49-34.pickle", "rb") as f:
data_list = pickle.load(f)
# save each information seperately
Frame=[]
Status=[]
Ballposition=[]
PlatformPosition=[]
Bricks=[]
for i in range(0,len(data_list)):
Frame.append(data_list[i].frame)
Status.append(data_list[i].status)
Ballposition.append(data_list[i].ball)
PlatformPosition.append(data_list[i].platform)
Bricks.append(data_list[i].bricks)
#%% calculate instruction of each frame using platformposition
import numpy as np
test = np.array(PlatformPosition)[:,0]
PlatX=np.array(PlatformPosition)[:,0][:, np.newaxis]
PlatX_next=PlatX[1:,:]
instruct=(PlatX_next-PlatX[0:len(PlatX_next),0][:,np.newaxis])/5
# select some features to make x
Ballarray=np.array(Ballposition[:-1])
next_Ballarray = np.array(Ballposition[1:])
x=np.hstack((Ballarray, next_Ballarray, PlatX[0:-1,0][:,np.newaxis]))
#select intructions as y
y=instruct
# split the data into train and test
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state= 0)
#normalize
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(x_train)
x_train_std = scaler.transform(x_train)
print(x_train_std)
#%% train your model here
from sklearn.neighbors import KNeighborsClassifier
from sklearn import svm
from sklearn.ensemble import RandomForestClassifier
clf = svm.SVC(gamma=0.001, decision_function_shape='ovo')
# clf = KNeighborsClassifier(n_neighbors=3)
# clf = RandomForestClassifier(random_state=5)
clf.fit(x_train_std,y_train)
x_test_std = scaler.transform(x_test)
pred = clf.predict(x_test_std)
# check the acc to see how well you've trained the model
from sklearn.metrics import accuracy_score
acc= accuracy_score(pred, y_test)
print(acc)
#%% save model
import pickle
filename="clf_example0401.sav"
pickle.dump(clf, open(filename, 'wb'))
# load model
# l_model=pickle.load(open(filename,'rb'))
# yp_l=l_model.predict(x_test)
# print("acc load: %f " % accuracy_score(yp_l, y_test))
|
11,967 | a45b9888e9a34ec5fa61fef27f552b43001aa28a | import csv
dire={}
i=3
while i!=0:
name=input('Enter Name: ')
usn=input('Enter USN: ')
dire[usn]=name
with open('direc.csv', 'a', newline='') as csvfile:
spamwriter = csv.writer(csvfile)
spamwriter.writerow([usn] + [name.title()])
i-=1
with open('direc.csv', newline='') as csvfile:
spamreader = csv.reader(csvfile)
for row in spamreader:
print(': '.join(row)) |
11,968 | 6aaad8bcdb03f4a61558d1dac73d6cad7de7f5bb | # -*- coding: utf-8 -*-
from django.db import models
from django.template.defaultfilters import slugify
from django.contrib.auth.models import User
from lugar.models import *
from multiselectfield import MultiSelectField
from sorl.thumbnail import ImageField
# Create your models here.
class Status(models.Model):
nombre = models.CharField(max_length=200)
def __unicode__(self):
return self.nombre
class Meta:
verbose_name = "Status Legal"
verbose_name_plural = "Status Legal"
TIPO_CHOICES = (
(1,'Miembro Canicacao'),
(2,'Organización de apoyo'),
)
class Organizacion(models.Model):
nombre = models.CharField(max_length=200,verbose_name='Organización/Institución')
siglas = models.CharField(max_length=200)
gerente = models.CharField(max_length=200,verbose_name='Representante legal',null=True,blank=True)
status = models.ForeignKey(Status,verbose_name='Status Legal',null=True,blank=True)
fundacion = models.DateField(verbose_name='Año fundación',null=True,blank=True)
direccion = models.CharField(max_length=300,null=True,blank=True)
municipio = models.ForeignKey(Municipio)
telefono = models.IntegerField(verbose_name='Número telefónico',null=True,blank=True)
fax = models.IntegerField(verbose_name='Número fax',null=True,blank=True)
email = models.EmailField(null=True,blank=True)
web = models.URLField(verbose_name='Página web',null=True,blank=True)
tipo = models.IntegerField(choices=TIPO_CHOICES,verbose_name='Tipo de Organización',null=True,blank=True)
logo = ImageField(upload_to='logo_org/',null=True,blank=True)
#usuario = models.ForeignKey(User)
def __unicode__(self):
return self.siglas
def save(self, *args, **kwargs):
if not self.id:
self.slug = slugify(self.siglas)
super(Organizacion, self).save(*args, **kwargs)
class Meta:
verbose_name = "Organización"
verbose_name_plural = "Organizaciones"
unique_together = ("nombre",)
SI_NO_CHOICES = (
(1,'Si'),
(2,'No'),
)
class Encuesta_Org(models.Model):
fecha = models.DateField()
organizacion = models.ForeignKey(Organizacion,related_name='Organizacion')
anno = models.IntegerField()
usuario = models.ForeignKey(User,related_name='User')
def __unicode__(self):
return self.organizacion.siglas
def save(self, *args, **kwargs):
self.anno = self.fecha.year
super(Encuesta_Org, self).save(*args, **kwargs)
class Meta:
verbose_name = "Encuesta"
verbose_name_plural = "Encuestas"
class Aspectos_Juridicos(models.Model):
tiene_p_juridica = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Personería jurídica')
act_p_juridica = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Actualización personería jurídica')
solvencia_tributaria = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Cuenta con solvencia tributaria (DGI)')
junta_directiva = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Junta Directiva certificada')
mujeres = models.IntegerField(verbose_name='Miembros mujeres JD')
hombres = models.IntegerField(verbose_name='Miembros hombres JD')
lista_socios = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Lista socias/os esta actualizada y certificada')
ruc = models.CharField(max_length=50,verbose_name='No. RUC',null=True,blank=True)
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Aspectos jurídicos"
verbose_name_plural = "Aspectos jurídicos"
DOCUMENTOS_CHOICES = (
(1,'Poseen estatutos'),
(2,'Cuentan con plan estratégico'),
(3,'Poseen libro de Actas'),
(4,'Tiene plan de negocios'),
(5,'Cuentan con plan de acopio'),
(6,'Poseen plan de comercialización'),
)
class Documentacion(models.Model):
documentos = models.IntegerField(choices=DOCUMENTOS_CHOICES)
si_no = models.IntegerField(choices=SI_NO_CHOICES,verbose_name='Si/No')
fecha = models.DateField(verbose_name='Fecha de elaboración u actualización')
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Inform. sobre documentación en gestión"
verbose_name_plural = "Inform. sobre documentación en gestión"
class Datos_Productivos(models.Model):
socias = models.IntegerField()
socios = models.IntegerField()
pre_socias = models.IntegerField()
pre_socios = models.IntegerField()
area_total = models.FloatField(verbose_name='Área total establecida por sus socias/os')
area_cert_organico = models.FloatField(verbose_name='Área con certificado orgánico')
area_convencional = models.FloatField(verbose_name='Área convencional')
cacao_baba = models.FloatField(verbose_name='QQ')
area_cacao_baba =models.FloatField(verbose_name='Mz')
cacao_seco = models.FloatField(verbose_name='QQ')
area_cacao_seco =models.FloatField(verbose_name='Mz')
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Datos productivos de la Org. y asociado"
verbose_name_plural = "Datos productivos de la Org. y asociado"
INFRAESTRUCTURA_CHOICES = (
(1,'Centro de Acopio central'),
(2,'Centro de acopio comunitarios'),
(3,'Hornos de secado'),
(4,'Planta de procesamiento'),
(5,'Bodegas'),
(6,'Cuartos fríos'),
(7,'Oficina'),
(8,'Medios de Transporte'),
(9,'Área de fermentado'),
(10,'Túneles de secado'),
)
ESTADO_CHOICES = (
(1,'Bueno'),
(2,'Malo'),
(3,'Regular'),
)
class Infraestructura(models.Model):
tipo = models.IntegerField(choices=INFRAESTRUCTURA_CHOICES,verbose_name='Tipo de Infraestructura')
cantidad = models.FloatField()
capacidad = models.FloatField(verbose_name='Capacidad de las instalaciones (qq)')
anno_construccion = models.DateField(verbose_name='Año de construcción')
estado = models.IntegerField(choices=ESTADO_CHOICES,verbose_name='Estado de infraestructura')
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Infraestructura y maquinaria"
verbose_name_plural = "Infraestructura y maquinaria"
TIPO_PROD_CHOICES = (
(1,'Cacao rojo'),
(2,'Cacao fermentado'),
(3,'Ambos'),
)
TIPO_MERCADO_CHOICES = (
(1,'Convencional'),
(2,'Orgánico'),
(3,'Comercio Justo'),
(4,'UTZ'),
)
DESTINO_CHOICES = (
(1,'Mercado Local'),
(2,'Mercado Nacional'),
(3,'Mercado Internacional'),
)
class Comercializacion_Org(models.Model):
#fecha = models.IntegerField(verbose_name='Año de recolección de información')
cacao_baba_acopiado = models.FloatField(verbose_name='Cacao en baba acopiado (qq)')
cacao_seco_comercializado = models.FloatField(verbose_name='Cacao seco comercializado (qq)')
socios_cacao = models.IntegerField(verbose_name='Socios que entregaron cacao al acopio')
productores_no_asociados = models.IntegerField(verbose_name='Productores no asociados')
tipo_producto = models.IntegerField(choices=TIPO_PROD_CHOICES,verbose_name='Tipo de producto comercializado')
tipo_mercado = MultiSelectField(choices=TIPO_MERCADO_CHOICES,verbose_name='Tipo de certificación')
destino_produccion = MultiSelectField(choices=DESTINO_CHOICES)
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Comercialización de la Organización"
verbose_name_plural = "Comercialización de la Organización"
class Comercializacion_Importancia(models.Model):
orden_importancia = models.CharField(max_length=200,verbose_name='Donde comercializa su cacao (por orden de importancia)')
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Comercialización Cacao"
verbose_name_plural = "Comercialización Cacao"
ACOPIO_COMERCIO_CHOICES = (
(1,'Propio'),
(2,'Crédito bancario'),
(3,'Cooperación Internacional'),
(4,'Financiamiento del comprador'),
)
class Acopio_Comercio(models.Model):
seleccion = MultiSelectField(choices=ACOPIO_COMERCIO_CHOICES)
#organizacion = models.ForeignKey(Organizacion)
encuesta = models.ForeignKey(Encuesta_Org,null=True,blank=True)
class Meta:
verbose_name = "Financiamiento de acopio y comerc."
verbose_name_plural = "Financiamiento de acopio y comerc." |
11,969 | b34c91a555ccc75421794ee67d4b0c298663856e | def funct(number):
sum = 0
for i in range(number):
sum += a_list[i]
return sum**2
a_list = [12, -7, 5, -89.4, 3, 27, 56, 57.3]
a = funct(len(a_list))
print(a) |
11,970 | 9965ad0be248a364eb271ee8313656d0129a705f | import json
import os
drinks = {}
for filename in os.listdir('.'):
if filename.endswith('.json') and filename[:-5].isnumeric():
with open(filename) as json_file:
drink_js = json.load(json_file)
drink_name = drink_js['strDrink']
drink_text = ''
if drink_js['strAlcoholic']:
drink_text += drink_js['strAlcoholic'] + '\n'
drink_text += drink_js['strCategory'] + '\n'
drink_text += drink_js['strGlass'] + '\n'
drink_text += '\n'
for i in range(1, 16):
if drink_js['strIngredient{}'.format(i)]:
drink_text += (drink_js['strMeasure{}'.format(i)] + ' ' + drink_js['strIngredient{}'.format(i)]).strip() + '\n'
drink_text += '\n'
drink_text += drink_js['strInstructions']
drinks[drink_name] = drink_text
with open('text2text_training_data.json', 'w') as drinks_json:
drinks_json.write(json.dumps(drinks))
|
11,971 | 048b79cdfb664c998b14495fa700993d884c0c65 | S = sorted(list(set(list(input()))))
alfabet = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
count = 0
for char in alfabet:
if not(char in S):
print(char)
break
count+=1
if count == 26:
print("None") |
11,972 | bef09f75a4820d326f6102b11a3453d654558abb | def shoping_cart(shop_cart):
cart = shop_cart
print ("wlecome to your shoping cart")
while True:
answer = input("What you wanna to do? \n For see all item print: items. \n For add some items print: add. \n For remove some item print: remove \n For quit print: quit \n")
if answer.lower() == "quit":
break
elif answer.lower() == "items":
for key, value in cart.items():
print("{} ${}".format(key,value))
elif answer.lower() == "add":
cart[input("enter an item: ")] = input("enter item description: ")
elif answer.lower() == "remove":
for key, value in cart.items():
print("{} ${}".format(key,value))
answer = input("what item you wanna remove?: ")
del cart[answer.lower()]
## answer = input("do you wann se your shopping cart? ")
# if answer.lower() == "quit":
# break
# elif answer.lower() == "yes":
# print("\n")
# for key, value in cart.items():
# print("{} ${}".format(key,value))
# answer = input ("do you want to remove some items from cart?")
# if answer.lower() == "quit":
# break
# elif answer.lower() == "yes":
# answer = input("what do you want to remove? ")
# if answer.lower() == "quit":
# break
# print(answer.lower())
# del cart[answer.lower()]
# elif answer.lower == "no":
# answer = input("")
#
return cart
#cart = {"laptop" : "2000","tv":"300"}
#shoping_cart(cart) |
11,973 | 88db57f9f017099ed8aa2f0987ef3f66a39f0c77 | from datetime import datetime
from sqlalchemy import CheckConstraint
from nb2 import db
class Person(db.Model):
"""
Represents someone that Nostalgiabot has quotes for;
usually an SDE employee.
Most Persons will have a slack_user_id if they still have a Slack account
where the bot is deployed. For non-Slack users, called Ghost Person,
a ghost_user_id is used to refer to the Person. This should be the username
of their email. All users should have a display_name.
"""
# A Person must have either a slack_user_id OR a ghost_user_id OR a display_name
# Some Persons may have both if they were a slack user at some point
# that has since been deactivated. Those deactivated Persons should be
# assigned a ghost_user_id to keep accessing their quotes.
__table_args__ = (
CheckConstraint("COALESCE(slack_user_id, ghost_user_id, display_name) IS NOT NULL"),
)
id = db.Column(db.Integer, primary_key=True)
slack_user_id = db.Column(
db.String(16),
index=True,
unique=True,
)
ghost_user_id = db.Column(
db.String(16),
index=True,
unique=True,
nullable=False,
)
first_name = db.Column(db.String(32), nullable=False)
last_name = db.Column(db.String(32))
display_name = db.Column(
db.String(80),
index=True,
)
quotes = db.relationship("Quote", backref="person", lazy=True)
def __repr__(self):
user_id = self.slack_user_id or self.ghost_user_id
return f"<Person: {user_id} | Name: {self.first_name} | Id: {self.id}>"
def has_said(self, quote: str) -> bool:
"""
Check if quote already exists in Nostalgiabot's memory for this Person.
Args:
quote: a string representing something this Person said.
Returns:
True if a Quote record in the db for this Person has the same content
as quote. False otherwise
"""
return any(q for q in self.quotes if q.content.lower() == quote.lower())
class Quote(db.Model):
"""
Represents something a Person has said;
usually something funny.
"""
id = db.Column(db.Integer, primary_key=True)
content = db.Column(db.Text, nullable=False)
person_id = db.Column(db.Integer, db.ForeignKey("person.id"), nullable=False)
created = db.Column(db.DateTime, nullable=False, default=datetime.utcnow)
def __repr__(self):
return f"<Quote: {self.content} | Id: {self.id}>"
|
11,974 | 488e92adab061996dddf2347fcef030c70c7d597 | # -*- coding: cp1252 -*-
words = ("10 34")
words2 = words.split(" ")
print words2
|
11,975 | 1a33505b173f44fa332c62b51b133564a93c2a9c |
# https://leetcode-cn.com/problems/longest-increasing-path-in-a-matrix/
# 记忆化搜索
class Solution(object):
def longestIncreasingPath(self, matrix):
"""
:type matrix: List[List[int]]
:rtype: int
"""
|
11,976 | 2e6a3d6deab93a4798254a3c5e4d94bf7722a948 | # -*- coding: utf-8 -*-
n = 101
for i in range(1,n):
if i%3 == 0 and i%5 != 0:
i = "Fizz"
print(i)
elif i%3 != 0 and i%5 == 0:
i = "Buzz"
print(i)
elif i%3 == 0 and i%5 == 0:
i = "FizzBuzz"
print(i)
else:
print(i) |
11,977 | b21534b7b29b48242403d7172a961f1f3058815f | #!/usr/bin/env python3
from brain_games import brain_logic
from brain_games import cli
def main():
cli.welcome_user('Find the greatest common divisor of given numbers.')
brain_logic.nod_game()
if __name__ == '__main__':
main()
|
11,978 | 1922703a6b41ba97e4da059530a84c8ea5623b9d | import logging
from logging import Formatter, FileHandler, StreamHandler
from os import environ
from sys import stdout
from discord.ext.commands import Bot
from dotenv import load_dotenv
load_dotenv()
DISCORD_TOKEN = environ["DISCORD_TOKEN"]
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
# noinspection SpellCheckingInspection
formatter = Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s')
handler = FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(formatter)
logger.addHandler(handler)
handler = StreamHandler(stream=stdout)
handler.setFormatter(formatter)
logger.addHandler(handler)
bot = Bot(command_prefix="!")
@bot.event
async def on_ready():
logger.info(f"bot is ready ----------")
await bot.load_extension("cogs.Manager")
bot.run(DISCORD_TOKEN)
|
11,979 | 0ee126ac867995d652a892accc279c118763e837 | from .encoder import Encoder
class KeywordExtractor:
def __init__(self):
"""
keyword extractor
"""
self.enc = Encoder()
def __call__(self, *args, **kwargs):
return self.enc(*args, **kwargs)
|
11,980 | 9befbccdde0758cc6ef0c35a2471ad4f5e47da73 | from antlr4.tree.Tree import ParseTree
from parser.PlSqlParser import PlSqlParser
from .visitor import Visitor
from .ast.hie_query import HierarchicalQueryNode
from .ast.token import TokenNode
from .ast.table_ref import TableRefNode
from .ast.selected_item import SelectedItemNode
from .ast.dot_id import DotIdNode
from .ast.relational_expression import RelationalExpressionNode
from .ast.prior_expression import PriorExpressionNode
class Translator(object):
"""
Транслятор нод (ORA -> PG)
"""
def __init__(self):
self.visitor = Visitor(translator=self)
def translate(self, tree: ParseTree):
return self.visit(tree)
def visit(self, ctx: ParseTree):
return self.visitor.visit(ctx)
def translate_hierarchical_query(self, ctx: PlSqlParser.Query_blockContext):
""" """
result = HierarchicalQueryNode()
# обрабатываем selected элементы
if ctx.selected_list().select_list_elements():
# указано явное перечисление полей
for selected_element in ctx.selected_list().select_list_elements():
result.selected_elements.append(self.visit(selected_element))
else:
# SELECT * FROM
result.selected_elements.append(TokenNode("*"))
# обрабатываем выражение FROM
for table_ref_ctx in ctx.from_clause().table_ref_list().table_ref():
result.table_refs.append(self.visit(table_ref_ctx))
# стартовая часть иерархического запроса
start_part_ctx = ctx.hierarchical_query_clause().start_part() if ctx.hierarchical_query_clause() else None
result.start_part = self.visit(start_part_ctx.condition()) if start_part_ctx else None
# рекурсивная часть иерархического запроса
hie_condition_ctx = ctx.hierarchical_query_clause().condition() if ctx.hierarchical_query_clause() else None
result.hie_condition = self.visit(hie_condition_ctx) if hie_condition_ctx else None
return result
def translate_selected_list_elements(self, ctx: PlSqlParser.Select_list_elementsContext):
""" Конвертация правила select_list_elements """
if ctx.ASTERISK() is not None:
return SelectedItemNode(name="*", table=self.visit(ctx.table_view_name()))
alias = None
if ctx.column_alias():
alias = self.visit(
ctx=ctx.column_alias().identifier() if ctx.column_alias().identifier() else \
ctx.column_alias().quoted_string()
).get_text()
table = None
name = None
name_node = self.visit(ctx.expression())
if isinstance(name_node, DotIdNode):
table, name = name_node.left, name_node.right
else:
name = name_node.get_text()
return SelectedItemNode(table=table, name=name, alias=alias)
def translate_dot_id(self, ctx: PlSqlParser.General_element_partContext):
""" """
return DotIdNode(
left=ctx.id_expression()[0].getText(),
right=ctx.id_expression()[1].getText()
)
def translate_table_ref_aux(self, ctx: PlSqlParser.Table_ref_auxContext):
""" """
return TableRefNode(
name=ctx.table_ref_aux_internal().getText(),
alias=ctx.table_alias().getText() if ctx.table_alias() else None
)
def translate_relational_expression(self, ctx: PlSqlParser.Relational_expressionContext):
""" """
return RelationalExpressionNode(
left=self.visit(ctx.relational_expression()[0]),
op=ctx.relational_operator().getText(),
right=self.visit(ctx.relational_expression()[1])
)
def translate_prior_expression(self, ctx: PlSqlParser.Unary_expressionContext):
return PriorExpressionNode(self.visit(ctx.unary_expression()))
|
11,981 | 31cdc6e37ddc376b5d7cad27da985e04b3c235dc | #!/usr/bin/python3.1
from Graph import *
from LettersGenerator import *
from Dictionary import *
class Solver:
""" classe qui résoud le jeu :
self.graph = le graph qui represente toutes les cases du plateau,
self.d = objet Dictionnary,
self.dico = l'arbre dictionnaire
self.lettersList = la liste des letters du plateau,
self.solution = la liste des solutions du solver"""
def __init__(self, graphParent, dicoParent, lettersList, solver):
self.g = graphParent
self.graph = self.g.graph
self.d = dicoParent
self.dico = dicoParent.trie
self.lettersList = lettersList
self.solver = solver
self.solution = self.setSolverType(solver)
print(self.wordsCounter(self.solution), "words found.")
def setSolverType(self, solver):
if solver == 0:
return self.solver_init()
elif self.solver == 1:
return self.solver2_init()
else :
print("xxx","solver execution error.")
def solver_init(self):
wordsFound = []
colors = ["white" for e in self.lettersList]
positions = [(i,e) for i,e in enumerate(self.lettersList)]
for e in positions:
self.solver_rec([], e, wordsFound, colors)
res = set(wordsFound)
res = self.decreasingList(res)
return res
def solver_rec(self, parcours, position, ensWordsFound, colors):
""" ensMotTrouves = ensemble des words trouves, selections = une liste de selection de letters, colors nous permet de savoir si l'etat à déja ete visité ou non"""
wordPotentiel = self.getWord(parcours + [position])
if self.d.isInTree( wordPotentiel) :
ensWordsFound.append(wordPotentiel)
if not self.d.prefixIsInTree(wordPotentiel):
return
else :
numCase = position[0]
newParcours = list(parcours)
newParcours.append((numCase, self.lettersList[numCase]))
newColors = list(colors)
newColors[numCase] = "black"
for neighbour in self.graph[numCase]:
if newColors[neighbour]=="white":
newPosition = (neighbour, self.lettersList[neighbour]);
self.solver_rec(newParcours, newPosition, ensWordsFound, newColors)
def solver2_init(self):
""" solver qui recherche dans le dictionnaire """
result = []
colors = ["white" for e in self.lettersList]
for letter in self.dico:
#print(letter)
self.solver2_rec([], letter, result, colors, self.dico)
res = set(result)
res = self.decreasingList(res)
return res
def solver2_rec(self, prefix, letter, result, colors, dico_current):
emplacements = self.g.isInGraph(letter)
#print(emplacements)
if emplacements==[]:
#print("-> emplacement VIDE")
return
else :
for emplacement in emplacements :
#print("->", emplacement)
if prefix ==[] or self.g.isNeighbour(emplacement, prefix[-1][0]):
#print("-> est neighbour de precedent")
if colors[emplacement] == "white":
#print(colors[emplacement])
wordPotentiel = self.getWord(prefix + [(emplacement,letter)])
if '_NULL_' in dico_current[letter]:
result.append(wordPotentiel)
if dico_current[letter] != {'_NULL_':'_NULL_'}:
new_dico_current = dico_current[letter]
newColors = list(colors)
newColors[emplacement] = "black"
for newLetter in new_dico_current:
self.solver2_rec(prefix + [(emplacement, letter)], newLetter, result, newColors, new_dico_current)
def getWord(self, list_couple):
"""recupere un string correspondant à la suite de cases séléctionnées """
if list_couple == []:
return ""
return ''.join([e[1].lower() for e in list_couple])
def decreasingList(self, words):
""" retourne les words d'une liste en ordre decroissant de longueur"""
dic = {}
words = self.d.lowerList(words)
for word in words:
if not len(word) in dic:
dic[len(word)] = []
dic[len(word)].append(word)
dic = dic.items()
dic = sorted(dic)
#dic.reverse()
return [f for e in dic for f in e[1]]
def wordsCounter(self, myList):
res = 0
if myList != None:
for e in myList:
res +=1
return res
|
11,982 | 569908cb7bf1513404a057c81c3c94e3c8f2241e | from database import connect
import uuid as uuidpkg
import psycopg2.extras
class CollectedDataPoint:
def __init__(self, variableHandle=None, timeFrame=None, value=None, organization_uuid=None, uuid=None,
interaction_uuid=None, variable_uuid=None):
self.variableHandle = variableHandle
self.timeFrame = timeFrame
self.value = value
self.organization_uuid = organization_uuid
self.uuid = uuidpkg.uuid4() if uuid is None else uuid
self.interaction_uuid = interaction_uuid
self.variable_uuid = variable_uuid
psycopg2.extras.register_uuid()
def addToDB(self):
with connect() as conn:
with conn.cursor() as cursor:
cursor.execute(
'INSERT INTO collectedDataPoints (variableHandle, timeFrame, value, organization_uuid, uuid, '
'interaction_uuid, variable_uuid) VALUES (%s, %s, %s, %s, %s, %s, %s)',
(self.variableHandle, self.timeFrame, self.value, self.organization_uuid, self.uuid,
self.interaction_uuid, self.variable_uuid))
@classmethod
def getByUUID(cls, uuid):
with connect() as conn:
with conn.cursor() as cursor:
cursor.execute('SELECT * FROM collectedDataPoints WHERE uuid=%s', (uuid,))
row = cursor.fetchone()
return cls(row[1], row[2], row[3], row[4], row[5], row[6], row[7])
|
11,983 | c936b9cc1aaf34f4f77f9652b90fafe85476aec7 |
import copy
from collections import OrderedDict
import ast
from utils import BindingDict
from fields import CharField, Field, IntegerField
from orm_wrapper import DemoPicker
from validators import ValidationError
# get all fields
class SerializerMetaclass(type):
def __new__(cls, name, bases, attrs):
fields = [
(field_name, attrs.pop(field_name))
for field_name, obj in list(attrs.items())
if isinstance(obj, Field)
]
attrs['_declared_fields'] = OrderedDict(fields)
newclass = super(SerializerMetaclass, cls).__new__(cls, name, bases, attrs)
declared_fields = OrderedDict()
for base in reversed(newclass.__mro__):
if hasattr(base, '_declared_fields'):
declared_fields.update(base._declared_fields)
newclass._declared_fields = declared_fields
return newclass
# derived from django-rest-framework
class BaseSerializer(Field):
def __init__(self, *args, **kwargs):
super(BaseSerializer, self).__init__(*args, **kwargs)
@property
def fields(self):
if not hasattr(self, '_fields'):
self._fields = BindingDict(self)
for key, value in self.get_fields().items():
self._fields[key] = value
return self._fields
def get_fields(self):
return copy.deepcopy(self._declared_fields)
def __getitem__(self, name):
"""Return a bind with the given name."""
try:
field = self.fields[name]
except KeyError:
raise KeyError(
"Key '%s' not found in '%s'. Choices are: %s." % (
name,
self.__class__.__name__,
', '.join(sorted(f for f in self.fields)),
)
)
return self._fields[name]
class Serializer(BaseSerializer, metaclass=SerializerMetaclass):
def __init__(self, picker, *args, **kwargs):
self.picker = picker
self.errors = []
self.validated_data = []
BaseSerializer.__init__(self, *args, **kwargs)
def get(self, condition={}, ignore_fields=[]):
if not condition:
res = self.picker.all()
else:
res = self.picker.filter(condition)
return self.to_representation(res, ignore_fields)
def save(self, data):
pass
def is_valid(self, data):
errors = []
for k, v in self.fields.items():
try:
v.run_validators(data[k])
except ValidationError as e:
errors.append(e.msg)
if not errors:
self.validated_data = data
return True
else:
self.errors = errors
return False
def create(self):
self.picker.save(self.validated_data)
def to_representation(self, res, ignore_fields):
newdata = []
for ires in res:
tmp = {
k:self.fields[k].to_representation(ires[k])
for k in set(self.fields.keys()) - set(ignore_fields)
}
newdata.append(tmp)
return newdata
class DCSerializer(Serializer):
def __init__(self, picker, *args, **kwargs):
Serializer.__init__(self, picker, *args, **kwargs)
def to_representation(self, res, ignore_fields):
newdata = []
for ires in res:
tmp = {
self.underline_to_camel(k):self.fields[k].to_representation(getattr(ires, k))
for k in set(self.fields.keys()) - set(ignore_fields)
}
newdata.append(tmp)
return newdata
def is_valid(self, data):
errors = []
validated_data = {}
for fieldname, v in self.fields.items():
k = self.underline_to_camel(fieldname)
if k in data.keys():
value = data[k]
else:
continue
try:
v.run_validators(value)
validated_data[fieldname] = value
except ValidationError as e:
errors.append(e.msg)
if not errors:
self.validated_data.append(validated_data)
return True
else:
raise ValidationError(errors)
def validate(self,data):
errors = []
for fieldname, v in self.fields.items():
k = self.underline_to_camel(fieldname)
if k not in data.keys() and v.required:
errors.append({k: v.error_messages['required']})
if errors:
raise ValidationError(errors)
def is_valid_all(self, data):
for idata in data:
try:
self.validate(idata)
self.is_valid(idata)
except ValidationError as e:
self.errors.extend(e.msg)
if not self.errors:
return True
else:
return False
def create(self):
for idata in self.validated_data:
if 'id' in idata.keys():
idata.pop('id')
idata['version'] = 0
self.picker.save(idata)
def update(self):
for idata in self.validated_data:
if 'id' not in idata.keys():
continue
res = self.picker.get({'id':idata['id']})
res.version += 1
for k in idata.keys():
if k != 'id' or k != 'version':
setattr(res,k,idata[k])
res.save()
def delete(self, idlist):
if isinstance(idlist, str):
idlist = ast.literal_eval(idlist)
errors = self.picker.delete(idlist)
return errors
@classmethod
def underline_to_camel(cls,snake_str):
"""
下划线命名转驼峰命名
:param snake_str:
:return:
"""
components = snake_str.split('_')
if '_' not in snake_str:
return snake_str
return components[0] + "".join(x.title() for x in components[1:])
@classmethod
def camel_to_underline(cls, field_name):
"""
驼峰命名法转小写加下划线
Role > role
roleName > role_name
RoleName > role_name
_RoleName > __role_name
role_name > role_name
"""
return ''.join(
[c.lower() if not c.isalpha() or c.islower() or (c.isupper() and idx == 0)
else '_%c' % c.lower()
for idx, c in enumerate(field_name)])
class FooSerializer(Serializer):
id = IntegerField()
name = CharField()
if __name__ == '__main__':
data = [(1, 'a'), (2, 'b'), (3,'c')]
newdata = []
for idata in data:
tmp = {k:v for k, v in zip(('id', 'name'), idata)}
newdata.append(tmp)
dp = DemoPicker()
foo = FooSerializer(dp)
try:
foo.get(ignore_fields=['id'])
if foo.is_valid(newdata[0]):
foo.create()
except ValidationError:
print(foo.errors)
|
11,984 | 5f565966c94c7e6fc00040cd897a542c92a93da7 | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class ScrapyBloomfilterItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class NewsItem(scrapy.Item):
source_url = scrapy.Field()
thumb = scrapy.Field()
title = scrapy.Field()
author = scrapy.Field()
release_time = scrapy.Field()
content = scrapy.Field()
inner_imgs = scrapy.Field()
|
11,985 | 188913192a16316ad9344d34e35ceb571ff9126b | from django.shortcuts import render, redirect
from django.contrib.auth.forms import UserCreationForm,AuthenticationForm
from django.contrib import messages
from django.contrib.auth import login
from django.contrib.auth import logout
from django.contrib.auth import authenticate
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from .decorators import unauthenticatedUser, allowedUsers
# Create your views here.
def index(request):
return render(request,'main/index.html',context={})
@unauthenticatedUser
def loginView(request):
if request.method == "POST":
form = AuthenticationForm(request = request, data = request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username,password=password)
if user is not None:
login(request,user)
messages.info(request,f"You are now logged in as {username}")
return redirect('/')
else:
messages.error(request, 'Invalid Username or Password.')
else:
messages.error(request, "Invalid username or password.")
form = AuthenticationForm()
#returns the login page
context = {'form':form}
return render(request,'main/login.html',context)
def signupView(request):
return render(request,'main/signup.html',context={})
def postRequest(request):
return render(request,"main/landRequest.html",context={}) |
11,986 | cd54a5bceccd088e322097382cb8445988f65664 | #!usr/bin/python
# -*- coding:utf-8 -*-
from common.Log import *
from Server import *
from common.DeviceManager import *
import threading
class ServerManager:
def __init__(self):
self.testdevices = DeviceManager.testdevices
self.serverobjects = []
self.threads = []
def start_all_server(self):
for deviceid,device in self.testdevices.iteritems():
server = Server(device)
self.serverobjects.append(server)
thread1 = threading.Thread(target=server.start)
thread1.start()
def stop_all_server(self):
for server in self.serverobjects:
server.stop()
def list_devices(self):
for deviceid,device in self.testdevices.iteritems():
server = Server(device)
server.list_connect_devices()
|
11,987 | 0c84be7c990cfc3fe0454065baaf2c5ca8c6517d | from django.conf import settings
PAYPAL_SUBS_CLIENT_ID = getattr(settings, "PAYPAL_SUBS_CLIENT_ID", None)
PAYPAL_SUBS_SECRET = getattr(settings, "PAYPAL_SUBS_SECRET", None)
PAYPAL_SUBS_LIVEMODE = getattr(settings, "PAYPAL_SUBS_LIVEMODE", False) # or 'live'
PAYPAL_SUBS_WEBHOOK_ID = getattr(settings, 'PAYPAL_SUBS_WEBHOOK_ID', None)
PAYPAL_SUBS_API_BASE_URL_SANDBOX = getattr(settings, 'PAYPAL_SUBS_API_BASE_URL_SANDBOX',
'https://api.sandbox.paypal.com')
PAYPAL_SUBS_API_BASE_URL_LIVE = getattr(settings, 'PAYPAL_SUBS_API_BASE_URL_LIVE',
'https://api.paypal.com')
if PAYPAL_SUBS_LIVEMODE is True:
PAYPAL_SUBS_API_BASE_URL = PAYPAL_SUBS_API_BASE_URL_LIVE
PAYPAL_SUBS_LIVEMODE_STRING = 'live'
elif PAYPAL_SUBS_LIVEMODE is False:
PAYPAL_SUBS_API_BASE_URL = PAYPAL_SUBS_API_BASE_URL_SANDBOX
PAYPAL_SUBS_LIVEMODE_STRING = 'sandbox'
PAYPAL_SETTINGS = {
"mode": PAYPAL_SUBS_LIVEMODE_STRING,
"client_id": PAYPAL_SUBS_CLIENT_ID,
"client_secret": PAYPAL_SUBS_SECRET,
}
if PAYPAL_SUBS_CLIENT_ID and PAYPAL_SUBS_SECRET:
from paypalrestsdk import configure
configure(PAYPAL_SETTINGS)
|
11,988 | 5db528bd854d0397587a6243382f95e0e4d65aeb | __version__ = '0.1'
default_app_config = 'jsonmirror.apps.JsonmirrorConfig' |
11,989 | bc404bb8b60ef8bea21f3de815e6d0206aacb3d3 | # Generated by Django 3.2.5 on 2021-09-07 22:37
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('last_name', models.CharField(max_length=100)),
('email', models.EmailField(max_length=255, unique=True)),
('role', models.CharField(choices=[('user', 'User'), ('admin', 'Admin')], max_length=255)),
('password', models.CharField(max_length=70)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
],
),
migrations.CreateModel(
name='Quote',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('autor', models.CharField(max_length=50)),
('q_cont', models.TextField(max_length=300)),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('creador', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='quotes', to='app_examen.user')),
('like', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='likes', to='app_examen.user')),
],
),
]
|
11,990 | 2391cda134deb737b587bee94b995db5f71bac89 | #!/usr/bin/env python
#
# check percentage of available DAOD events that were used in a production by comparing DxAOD yields (from CxAOD files)
# with the AMI DxAOD yields
# Note on yields files format:
# yields file where 3rd column is number of DxAOD input events made using count_Nentry_SumOfWeight.py and countNumberInDxAOD=True
#
from ConfigParser import ConfigParser
from ConfigParser import Error as ConfigError
import argparse
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-y', '--yieldsfile', help='CxAOD DAOD yields file', default='dslist_NevtDxAOD_HIGG5D2_e.txt')
parser.add_argument('-a', '--amifile', help='AMI DxAOD yields', default='dslist_NevtDxAOD_HIGG5D2_e.txt')
parser.add_argument('-o', '--outputfile', help='output file', default='dslist_NevtDxAOD_CxAODfraction.txt')
parser.add_argument('-d', '--debug', action='store_true',help='debugging mode')
args = parser.parse_args()
print parser.parse_args()
# inputs
yields_file_name=args.yieldsfile
in_file_name =args.amifile
out_file_name = args.outputfile
#folder_out=args.outputFolder
debug=args.debug
print "Input file (DxAOD yields from CxAODs):", yields_file_name
print "Input file (DxAOD events from AMI):", in_file_name
print "Running ..."
try:
yields_file = open(yields_file_name, 'r')
except IOError:
print 'could not open', yields_file_name
try:
in_file = open(in_file_name, 'r')
except IOError:
print 'could not open', in_file_name
out_file = open(out_file_name, 'w')
# loop over lines from yield file made to compare with AMI and fill yields array
yields_arr=[]
if debug:
print "Loop over lines from yieldsfile"
for yields in yields_file :
p = yields.split()
if debug:
print "list",p
if len(p)!=5:
print "row",p,"from yield files doesn't have 5 elements. Will ABORT!!!"
assert(False)
# datasetid, nDxAOD, AMItags
yields_arr.append([p[0],p[2],p[4]])
# done loop over lines in yields files for comparing with AMI
# loop over lines from file with AMI info and fill DAOD array
daod_arr=[]
if debug:
print "Loop over lines from AMI file"
for lines in in_file:
p = lines.split()
if debug:
print "list",p
if len(p)!=4:
print "row",p,"from file of AMI info does not have 4 elements. Will ABORT!!!"
assert(False)
# dsid, nDAOD, AMITag, name
daod_arr.append([p[0],p[1],p[2],p[3]])
# done loop over lines from file with AMI info
if debug:
print "Loop over yields:"
for yields in yields_arr:
print "yields",yields
print "Loop over daod:"
for daod in daod_arr:
print "daod",daod
print ""
# loop over the AMI values and see which fraction you have in the derivation
# but if you have others that are not in AMI it will be flagged below
nomatch_arr=[] # to avoid listing twice
for daod in daod_arr:
# print '', daod[0]
match=False
for yields in yields_arr:
if debug:
print "compare",yields[0],"with",daod[0],"and",yields[2],"with",daod[2],"value",yields[1],"with",daod[1]
matchDSID =(yields[0]==daod[0])
matchAMITags=(yields[2]==daod[2])
if matchDSID and matchAMITags:
match=True
if match and debug:
print 'match',yields[0],"with",daod[0],"and",yields[2],"with",daod[2],"value",yields[1],"with",daod[1]
break
# done if
# done loop over line in yield files
if not match:
# add to the list of not found
if daod[0] not in nomatch_arr:
nomatch_arr.append(daod[0])
print 'No match found for dataset ', daod[0], daod[2]
# create a dummy value of yields with zero yield
yields=[daod[0],0.0,daod[2]]
# done if
# compute the current and total percent
rat=float(yields[1])/float(daod[1])
if debug:
print "yield",yields[1],"ami",daod[1],"ratio",rat
line="%-10s %-8.3f %-60s %-60s" % (yields[0], rat, yields[2], daod[3])
out_file.write(line+"\n")
# done loop over the AMI info file
print ""
print "None AMI :"
# if you have others that are not in AMI it will be studied here
# loop over the CxAOD yields
for yields in yields_arr:
match=False
for daod in daod_arr:
if debug:
print "compare",yields[0],"with",daod[0],"and",yields[2],"with",daod[2],"value",yields[1],"with",daod[1]
matchDSID =(yields[0]==daod[0])
matchAMITags=(yields[2]==daod[2])
if matchDSID and matchAMITags:
match=True
if match and debug:
print 'match',yields[0],"with",daod[0],"and",yields[2],"with",daod[2],"value",yields[1],"with",daod[1]
break
# done if
# done loop over line in yield files
if not match:
print 'No match found for dataset ', yields[0], yields[2]
#
yields_file.close()
out_file.close()
in_file.close()
print ""
print ""
print "Done all in checkForExtensions.py"
|
11,991 | fe801d6dc871e6345a178dab72c2ee8bd35c9535 | #!/usr/bin/env python
"""
Representations for a set of inputs.
"""
from ag_frame.algorithms.representations import base
from ag_frame.algorithms import utils
class BitArray(base.BaseRepresentation):
"""Base Representation."""
# If this is a combinatoric representation
COMBINATORIC = False
# The name of the array
_name = "bit_array"
# pragma pylint: disable=unused-argument
def __init__(self, nr_args, precision):
"""Initializa the arguments.
:param nr_args: The number of arguments.
"""
super(BitArray, self).__init__(nr_args)
self._precision = precision
def add_domains_restriction(self, domain_restriction):
"""Add the domain restrictions."""
self._domain_restricion = domain_restriction
self._size_var = self._get_size_var()
self._nr_of_bits = self._get_nr_of_bits()
@classmethod
def is_implemented(cls):
"""Check if the Clase is finnal."""
return True
def _get_size_var(self):
"""Get the size of every variable."""
size_var = []
for index in range(self._nr_args):
restriction = self._domain_restricion[index]
size_var.append(utils.get_nr_bits(restriction, self._precision))
return size_var
def _get_nr_of_bits(self):
"""Get the number of bits needed for an item."""
return sum(self._size_var)
def get_random(self):
"""Get a random genom."""
base_genom = "1" * sum(self._size_var)
return utils.randomise_a_string(base_genom)
def encode(self, args):
"""Encode the arguments."""
to_return = ""
for index, arg in enumerate(args):
nr_bits = utils.get_nr_bits(self._domain_restricion[index],
self._precision)
domain_lenght = (self._domain_restricion[index][1]
- self._domain_restricion[index][0])
num = (arg * (2 ** nr_bits - 1) -
self._domain_restricion[index][0])
bit = int((num / domain_lenght))
str_bit = "{0:b}".format(bit)
if len(str_bit) < nr_bits:
str_bit = str_bit.zfill(nr_bits - len(str_bit))
to_return = to_return + str_bit
return to_return
def decode(self, args):
"""Decode the arguments."""
return utils.string_to_args(args, self._size_var,
self._domain_restricion,
self._precision)
|
11,992 | 4ee8b52e71426cb9937118ae4052e2ac15124d47 | import abc
from abc import ABC, abstractmethod
# Connection Interface
class connection(ABC):
@abc.abstractmethod
def getData(self):
pass
@abc.abstractmethod
def setData(self):
pass
#DB Class
class DatabaseService(connection):
@classmethod
def getData(self):
print("Getting Data From DB")
@classmethod
def setData(self):
print("Setting Data From DB")
#API Class
class APIService(connection):
@classmethod
def getData(self):
print("Getting Data From API")
@classmethod
def setData(self):
print("Setting Data From API")
#Test
DB = DatabaseService()
DB.getData()
DB.setData()
API = APIService()
API.getData()
API.setData()
|
11,993 | 19572b18b148c8edb76cc6683fe58de99de64abf | #Author - Kaustabh Singh
t=int(input())
for i in range(t): #for test cases
a=list()
s=int(input()) #for matrix size
for j in range(s):
inp = list(map(int,input().split())) #taking input for each row
a.append(inp)
c=0
for j in range(s):
for k in range(s):
l=a[j][k] #calculation of M[x1][y1]
for y in range(j,s):
for z in range(k,s):
if (l>a[y][z]): #checking M[x1][y1] is greater rhan M[x2][y2] or not
c=c+1 # storing count
print(c)
|
11,994 | 75d61344a651de48913a941c4a0a269052545d9e | import torch
from torch import nn
from torch.distributions.categorical import Categorical
from torch.nn import functional as F
import math
import torch
from resnet import ResNetEncoder
class Policy(nn.Module):
def __init__(self, env_config, num_players=2, max_entities=10):
super(Policy, self).__init__()
# State Parser
self.parse_state = ParseState(env_config, 10)
self.MAX_ENTITIES = 10
# Map Encoder
self.map = MapEmbedding(128)
# Entity Encoder
self.entity = EntityEmbedding(128, env_config['size'], 1)
# Scalar Encoder
self.scalar_encoder = nn.Linear(num_players, 128)
# transformer
# self.max_entities = 10
self.action_map = [None, "NORTH", "EAST", "SOUTH", "WEST", "CONVERT", "SPAWN"]
self.SHIP_TYPE = 2
self.SHIPYARD_TYPE = 1
num_actions = len(self.action_map)
self.transformer = nn.TransformerEncoder(nn.TransformerEncoderLayer(d_model=128, nhead=8, dim_feedforward=100), 2, norm=None)
self.policy = nn.Sequential(
nn.Linear(128, 128),
nn.ReLU(),
nn.Linear(128, num_actions)
)
self.value = nn.Sequential(
nn.Linear(128 * self.MAX_ENTITIES, 400),
nn.ReLU(),
nn.Linear(400, 100),
torch.nn.ReLU(),
nn.Linear(100, 1)
)
self.softmax = nn.Softmax(-1)
def device(self):
return next(self.parameters()).device
def forward(self, state, mask = False):
# Scalar encoding
state = self.parse_state(state)
scalar = state['scalar'].to(self.device())
scalar_encoding = F.relu(self.scalar_encoder(scalar)).unsqueeze(1)
# Spatial Encoding
game_map = state['map'].to(self.device())
map_encoding = self.map(game_map).unsqueeze(1)
# Entity Encoding
entity_typ = state['entity_typ'].to(self.device())
entity_pos = state['entity_pos'].to(self.device())
entity_scalar = state['entity_scalar'].to(self.device())
entity_encodings = self.entity(entity_typ, entity_pos, entity_scalar)
embeddings = map_encoding + entity_encodings + scalar_encoding
set_embedding = self.transformer(embeddings)
out = self.policy(set_embedding)
if mask == True:
lens = []
for eid in state['entity_id']:
n_entities = len(eid)
lens.append(torch.tensor([1] * n_entities + [0] * (self.MAX_ENTITIES - n_entities)))
m = torch.stack(lens).to(self.device())
return self.softmax(out), m
return self.softmax(out)
def action(self, states):
if not isinstance(states, list):
states = [states]
t_states = self.parse_state(states)
out = self.forward(states)
actions_iter = []
raw_actions_iter = []
for i, state in enumerate(states):
raw_actions = Categorical(probs=out[i]).sample()
actions = {}
n_entities = len(t_states['entity_id'][i])
# TODO: Migrate this code to env helper
for e, eid in enumerate(t_states['entity_id'][i]):
act = self.action_map[raw_actions[e]]
typ = t_states['entity_typ'][i][e]
if typ == self.SHIP_TYPE and act == "SPAWN":
act = None
elif typ == self.SHIPYARD_TYPE and (act != "SPAWN" and act != None):
act = None
elif typ == 0:
continue
if act == "SPAWN":
if n_entities < self.MAX_ENTITIES:
n_entities += 1
else:
act = None
if act is not None:
actions[eid] = act
actions_iter.append(actions)
raw_actions_iter.append(raw_actions)
return actions_iter, raw_actions_iter
class ParseState(object):
"""Rescale the image in a sample to a given size.
Args:
output_size (tuple or int): Desired output size. If tuple, output is
matched to output_size. If int, smaller of image edges is matched
to output_size keeping aspect ratio the same.
"""
def __init__(self, config, max_entities):
self.map_size = config['size']
self.max_halite = config['maxCellHalite']
self.starting_halite = config['startingHalite']
self.max_entities = max_entities
def __call__(self, states):
if not isinstance(states, list):
states = [states]
spat_map_iter = []
entity_typ_iter = []
entity_pos_iter = []
entity_id_iter = []
entity_scalar_iter = []
scalar_iter = []
for s in states:
step = s['step']
halite = torch.tensor(s['halite']).float()
halite = halite.reshape(self.map_size, self.map_size, 1) / self.max_halite
obstruction = torch.zeros(self.map_size**2).float()
me = s['players'][s['player']]
my_halite, my_shipyards, my_ships = tuple(me)
scalar = torch.zeros(len(s['players']))
scalar[0] = my_halite
entity_typ = []
entity_pos = []
entity_scalar = []
entity_id = []
for shipyard_id, shipyard_pos in my_shipyards.items():
obstruction[shipyard_pos] = 1.0
x = int(shipyard_pos % self.map_size)
y = int(shipyard_pos / self.map_size)
entity_typ.append(1)
entity_pos.append([x,y])
entity_scalar.append([0])
entity_id.append(shipyard_id)
for ship_id, ship_pos in my_ships.items():
obstruction[ship_pos[0]] = 1.0
x = int(ship_pos[0] % self.map_size)
y = int(ship_pos[0] / self.map_size)
entity_typ.append(2)
entity_pos.append([x,y])
entity_scalar.append([ship_pos[1]])
entity_id.append(ship_id)
opponents = s['players']
scalar_loc = 1
for i, opponent in enumerate(opponents):
if i != s['player']:
opp_halite, opp_shipyards, opp_ships = tuple(opponent)
scalar[scalar_loc] = opp_halite
for shipyard_pos in opp_shipyards.values():
obstruction[shipyard_pos] = 1.0
for ship_pos in opp_ships.values():
obstruction[ship_pos[0]] = 1.0
scalar_loc += 1
obstruction = obstruction.reshape(self.map_size, self.map_size, 1)
spat_map = torch.cat((halite, obstruction), 2).unsqueeze(0).permute(0,3,1,2)
n_entities = len(entity_id)
diff = self.max_entities - n_entities
entity_typ = F.pad(torch.tensor(entity_typ).long().unsqueeze(0), (0, diff), "constant", 0)
entity_pos = F.pad(torch.tensor(entity_pos).long().unsqueeze(0), (0, 0, 0, diff), "constant", 0)
entity_scalar = F.pad(torch.tensor(entity_scalar).float().unsqueeze(0), (0, 0, 0, diff), "constant", 0)
scalar = scalar.unsqueeze(0) / self.starting_halite
spat_map_iter.append(spat_map)
entity_typ_iter.append(entity_typ)
entity_pos_iter.append(entity_pos)
entity_id_iter.append(entity_id)
entity_scalar_iter.append(entity_scalar)
scalar_iter.append(scalar)
return {
'map': torch.cat(spat_map_iter),
'entity_typ': torch.cat(entity_typ_iter),
'entity_pos': torch.cat(entity_pos_iter),
'entity_scalar': torch.cat(entity_scalar_iter),
'entity_id': entity_id_iter,
'scalar': torch.cat(scalar_iter)
}
class MapEmbedding(nn.Module):
def __init__(self, embed_size=256, depth=2, maps=2):
super(MapEmbedding, self).__init__()
blocks = []
c_b = 64
while c_b < embed_size:
blocks.append(c_b)
c_b *= 2
blocks.append(embed_size)
deepths = [depth] * len(blocks)
self.resnet = ResNetEncoder(in_channels=maps, blocks_sizes=blocks, deepths=deepths)
def forward(self, multi_layer_map):
return self.resnet(multi_layer_map)
class EntityEmbedding(nn.Module):
def __init__(self, d_model, map_size, n_scalars):
super(EntityEmbedding, self).__init__()
# self.lut = pre_trained.embeddings.word_embeddings
self.EntityType = nn.Embedding(2 + 1, d_model)
self.EntityPosition = PositionalEncoding2D(d_model, map_size, map_size)
self.fc = nn.Linear(n_scalars, d_model)
self.EntityType.weight.data.uniform_(-0.1, .1)
def forward(self, typ, pos, scalar):
return self.EntityType(typ) + self.EntityPosition(pos) + self.fc(scalar)
# Retrieved from pytorch website
class PositionalEncoding2D(nn.Module):
def __init__(self, d_model, height, width):
super(PositionalEncoding2D, self).__init__()
if d_model % 4 != 0:
raise Error()
pe = torch.zeros(d_model, height, width)
d_model = int(d_model / 2)
div_term = torch.exp(torch.arange(0., d_model, 2) * -(math.log(10000.0) / d_model))
pos_w = torch.arange(0., width).unsqueeze(1)
pos_h = torch.arange(0., height).unsqueeze(1)
pe[0:d_model:2, :, :] = torch.sin(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[1:d_model:2, :, :] = torch.cos(pos_w * div_term).transpose(0, 1).unsqueeze(1).repeat(1, height, 1)
pe[d_model::2, :, :] = torch.sin(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
pe[d_model + 1::2, :, :] = torch.cos(pos_h * div_term).transpose(0, 1).unsqueeze(2).repeat(1, 1, width)
self.register_buffer('pe', pe)
def forward(self, pos):
# (*, 2)
pos = pos.transpose(0, -1)
return self.pe[:, pos[0], pos[1]].transpose(0, -1)
if __name__ == "__main__":
pe = PositionalEncoding2D(8, 10, 10)
pos = torch.tensor([[[0,0], [0,0], [9,9]],[[0,0], [0,0], [9,9]]])
print(pe(pos))
|
11,995 | 29ebb74d1378b2aceeb89a17d945538f38964b9a | # Turn Stereo Signals To Mono And Shorten
# Turn from stereo to mono
def mono(source):
source_mono = source[0:,0]
return source_mono
# Reduce the source to X seconds
# Takes in mono file, sampling rate, and amount of seconds desired
# outputs file cut to seconds
def cut(source, Fs, seconds):
cut_source = source[0:seconds*Fs]
return cut_source
|
11,996 | c1fe0a109a3b28dba6b2e8d8a0affa690e4067ec | from datetime import datetime, timedelta
from django.contrib.admin.views.decorators import staff_member_required
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, JsonResponse
import clients.models as clients
import directory.models as directory
from appconf.manager import SettingManager
from rmis_integration.client import Client
from slog.models import Log as slog
CLEANUP_TYPES_LOG = (
1,
2,
3,
4,
5,
6,
10,
16,
17,
18,
19,
20,
25,
27,
22,
23,
100,
998,
999,
1001,
2000,
2001,
2002,
2003,
2004,
2005,
2006,
3000,
3001,
5000,
6000,
10000,
20000,
60001,
60003,
)
@login_required
@staff_member_required
def log(request):
response = {"cnt": slog.objects.all().count(), "store_days": SettingManager.get("max_log_store_days", "120", "i")}
response["to_delete"] = slog.objects.filter(time__lt=datetime.today() - timedelta(days=response["store_days"]), type__in=CLEANUP_TYPES_LOG).count()
return JsonResponse(response)
@login_required
@staff_member_required
def log_cleanup(request):
_, cnt = slog.objects.filter(time__lt=datetime.today() - timedelta(days=SettingManager.get("max_log_store_days", "120", "i")), type__in=CLEANUP_TYPES_LOG).delete()
return HttpResponse(str(cnt.get("slog.Log", 0)), content_type="text/plain")
@login_required
@staff_member_required
def db(request):
response = []
return JsonResponse(response, safe=False)
@login_required
@staff_member_required
def rmis_check(request):
c = Client()
return HttpResponse(c.search_organization_id(check=True) + " " + c.search_dep_id(check=True), content_type="text/plain")
@login_required
@staff_member_required
def archive_without_directions(request):
objs = clients.Card.objects.filter(napravleniya__isnull=True, is_archive=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Card", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def patients_without_cards(request):
objs = clients.Individual.objects.filter(card__isnull=True)
cnt = objs.count()
if request.GET.get("remove", "0") == "1":
_, cnt = objs.delete()
cnt = cnt.get("clients.Individual", 0)
return HttpResponse(str(cnt), content_type="text/plain")
@login_required
@staff_member_required
def sync_departments(request):
c = Client()
return HttpResponse("Добавлено: %s. Обновлено: %s." % c.department.sync_departments(), content_type="text/plain")
@login_required
@staff_member_required
def sync_researches(request):
r = directory.Researches.objects.filter(podrazdeleniye__isnull=True, subgroup__isnull=False)
cnt = r.count()
for research in r:
research.podrazdeleniye = research.subgroup.podrazdeleniye
research.save()
return HttpResponse(str(cnt), content_type="text/plain")
|
11,997 | 07a7d586e0410ba707e8e966b3a73d2bef359959 | '''
Created on 09.01.2012
@author: desales
'''
class Node:
cargo = None
nnext = None
def __init__(self, cargo):
self.cargo = cargo
def __str__(self):
return "<Node: %s>" % self.cargo
class LinkedList:
firstNode = None
#---------------------------------
# Insert operations
#---------------------------------
def insertBeginning(self, cargo):
self._insertBeginning(Node(cargo))
def _insertBeginning(self, newNode):
newNode.nnext = self.firstNode
self.firstNode = newNode
def insertAfter(self, cargo, newCargo):
self._insertAfter(self._find(cargo), Node(newCargo))
def _insertAfter(self, node, newNode):
newNode.nnext = node.nnext
node.nnext = newNode
def find(self, cargo):
"""Redundant, implemented to demonstrate _find"""
return self._find(cargo).cargo
def _find(self, cargo):
for node in self.traverse():
if node.cargo == cargo:
return node
return
def __str__(self):
res = []
for node in self.traverse():
res.append("[%s] -> " % (node.cargo) )
return "".join(res)
def remove_node_after(self, node):
self.remove_after(self, node.cargo)
def remove_after(self, cargo):
node = self.firstNode
while node:
if node.cargo == cargo:
node.nnext = node.nnext.nnext
node = node.nnext
def direct_remove(self, cargo):
node = self._find(cargo)
if not node:
return
if node.nnext:
node.cargo = node.nnext.cargo
node.nnext = node.nnext.nnext
else: #tail
node = None
#NOTE: This problem can not be solved if the node to be deleted
#is the last node in the linked list.
def remove(self, cargo):
if not self.firstNode:
return
if cargo == self.firstNode.cargo:
self.firstNode = self.firstNode.nnext
return
node = self.firstNode
while node:
if node.nnext:
if cargo == node.nnext.cargo:
node.nnext = node.nnext.nnext
return
node = node.nnext
def traverse(self):
node = self.firstNode
while node:
yield node
node = node.nnext
def is_empty(self):
return self.firstNode == None
|
11,998 | 271b1ad8ea535102167c19a1f7ce375322cc8b96 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""Soccer Goal Definition.
TODO:
"""
from rendering.examples.dynamics.particle import Particle
from geometry.vector import Vec2d
from rendering.examples.soccer.dynamics.SteeringBehaviours import SteeringBehaviours
from geometry.point import Point
from rendering.base import Color
class SoccerPlayer(Particle):
def __init__(self, team, colour: Color, number: int, pos: Point, soccer_field):
super().__init__(pos, 15, Vec2d(0, 0), Vec2d(4, 4), Vec2d(soccer_field.playing_area.center), 1)
self.team = team
self.colour = colour
self.number = number
self.initial_pos = pos
self.soccer_field = soccer_field
a_pt = soccer_field.playing_area.center - pos
self.direction = Vec2d(x_or_pair=(a_pt.x, a_pt.y)).normalized()
self.steering_behaviours = SteeringBehaviours(self, soccer_field.ball)
self.steering_behaviours.activated['arrive'] = True
def reset(self, pos): # TODO: this parameter is not used.
self.pos = self.initial_pos
def warm_up(self):
"""Runs back and forth between the ball and a random point in the field."""
self.velocity = self.steering_behaviours.calculate()
self.pos += self.velocity
self.pos = Point(int(self.pos.x), int(self.pos.y))
if not self.is_moving():
if self.steering_behaviours.target == self.soccer_field.ball.pos:
# let's go back towards where I was.
self.steering_behaviours.target = self.initial_pos
else:
# let's go towards the ball.
self.steering_behaviours.target = self.soccer_field.ball.pos
self.direction = Vec2d(self.steering_behaviours.target - self.pos).normalized()
def move(self):
self.warm_up()
|
11,999 | 55f376fe8b9deb59b59313d28ccee244f335dd26 | import unittest
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import os
from os import path
import time
import zipfile
import shutil
import xml.etree.ElementTree as ET
test_url = "https://start.spring.io/"
download_dir = '/home/student/Downloads/selenium/'
artifact_name = 'my_kotlin'
download_file = download_dir + artifact_name + '.zip'
chrome_options = Options()
chrome_options.add_argument("--start-maximized")
chrome_options.add_experimental_option('prefs', {
'download.default_directory': download_dir,
})
tmp_dir = '/tmp/'
extracted_dir = tmp_dir + artifact_name + '/'
class SpringBootWebTestCase(unittest.TestCase):
def setUp(self):
if path.exists(download_file):
os.remove(download_file)
self.driver = webdriver.Chrome(options=chrome_options)
self.driver.implicitly_wait(5)
self.driver.get(test_url)
def tearDown(self):
self.driver.quit()
def test_should_open_website(self):
"""
Open website and verify that it is load successfully.
:return:
"""
search_str = "//*[text()='Generate']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Page loads failed!')
def test_should_choose_maven(self):
"""
Verify can select Maven option
:return:
"""
search_str = "//*[text()='Maven Project']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Maven project is not found!')
els[0].click()
def test_should_choose_kotlin(self):
search_str = "//*[text()='Kotlin']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Kotlin is not found!')
els[0].click()
def test_should_choose_spring_boot_version(self):
search_str = "//*[text()='2.2.8']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'spring-boot version is not found!')
els[0].click()
def test_should_enter_group_name(self):
search_str = 'input-group'
els = self.driver.find_elements_by_id(search_str)
self.assertGreater(len(els), 0, 'input-group is not found!')
els[0].clear()
els[0].send_keys('com.drkiettran')
def test_should_enter_artifact_name(self):
search_str = 'input-artifact'
els = self.driver.find_elements_by_id(search_str)
self.assertGreater(len(els), 0, 'input-artifact is not found!')
els[0].clear()
els[0].send_keys(artifact_name)
def test_should_choose_packaging_type(self):
search_str = "//*[text()='War']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Packaging type War is not found!')
els[0].click()
def test_should_choose_jdk_version(self):
search_str = "//*[text()='11']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'JDK version 11 is not found!')
els[0].click()
def test_should_select_generate_button(self):
search_str = "//*[text()='Generate']"
els = self.driver.find_elements_by_xpath(search_str)
self.assertGreater(len(els), 0, 'Generate button is not found!')
els[0].click()
def test_should_receive_a_zip_file(self):
self.test_should_choose_maven()
self.test_should_choose_kotlin()
self.test_should_choose_spring_boot_version()
self.test_should_enter_group_name()
self.test_should_enter_artifact_name()
self.test_should_choose_packaging_type()
self.test_should_choose_jdk_version()
self.test_should_select_generate_button()
self.assertTrue(self.wait_for_zip_file(), 'zip file not found!')
def test_should_download_correct_spring_boot_project(self):
self.test_should_receive_a_zip_file()
self.process_zip_file()
self.assertTrue(self.it_is_a_maven_project())
def it_takes_too_long(self, count):
return count > 5
def wait_for_zip_file(self):
count = 0
while True:
time.sleep(1)
count += 1
if path.exists(download_file) or self.it_takes_too_long(count):
break
self.assertTrue(path.exists(download_dir))
return True
def process_zip_file(self):
if path.exists(extracted_dir):
shutil.rmtree(extracted_dir)
with zipfile.ZipFile(download_file, 'r') as zip_ref:
zip_ref.extractall(tmp_dir)
def it_is_a_maven_project(self):
pom_file = extracted_dir + 'pom.xml'
tree = ET.parse(pom_file)
root = tree.getroot()
self.assertEqual('2.2.8.RELEASE', self.get_spring_boot_version(root))
return True
def get_spring_boot_version(self, root):
for child in root:
if 'parent' in child.tag:
for gchild in child:
if 'version' in gchild.tag:
return gchild.text
if __name__ == '__main__':
unittest.main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.