index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
19,300 | b6447589faac6e459784eed04381ef6010947051 | #Import required packages and modules
import streamlit as st
from keras.models import model_from_json
from PIL import Image
import pandas as pd
import SessionState
import skimage
import skimage.io
import skimage.transform
import numpy as np
#Load Keras CNN
file = open("model_config.json", 'r')
model_json = file.read()
file.close()
model = model_from_json(model_json)
model.load_weights("model_weights.h5")
#Image preparation for predictions
def read_image():
image = skimage.io.imread(state.path)
image = skimage.transform.resize(image, (100, 100))
return image[:,:,:3]
#Load dataframe
df = pd.read_csv('test_df_strat.csv', index_col=0)
#Load and set icon
Logo = Image.open("photos/Logo_full_Red_white_letters.png")
st.set_page_config(page_title="Bee classifier",
page_icon=None, layout='centered',
initial_sidebar_state='auto')
red_line = Image.open("photos/Red_line.png")
#Custom header
st.image(red_line)
col1, col2 = st.beta_columns((3, 1))
col1.markdown(
"<h1 style='color: #CE1B28;'>Environmental Data Science Playground</h1>",
unsafe_allow_html=True,
)
col2.image(Logo, width=100)
st.image(red_line)
#Set title
st.title('Bee classifier')
#Descriptive text
intro = """With this little app I want to demonstrate the potential of
convolutional neural networks (CNNs) for image classification. It makes use of
[Kaggle's BeeImage Dataset](https://www.kaggle.com/jenny18/honey-bee-annotated-images).\n
This dataset includes labeled low-quality images of four bee subspecies:"""
st.write(intro)
#Load bee pics of the four species
col1, col2, col3, col4 = st.beta_columns(4)
pic1 = Image.open("photos/Carniolan_honey_bee.jpg")
col1.markdown("**Carniolan honey bee**")
col1.image(pic1, use_column_width=True)
pic2 = Image.open("photos/Italian_honey_bee.jpg")
col2.markdown("**Italian honey bee**")
col2.image(pic2, use_column_width=True)
pic3 = Image.open("photos/Russian_honey_bee.jpg")
col3.markdown("**Russian honey bee**")
col3.image(pic3, use_column_width=True)
pic4 = Image.open("photos/Western_honey_bee.jpg")
col4.markdown("**Western honey bee**")
col4.image(pic4, use_column_width=True)
#Descriptive text
intro2 = """The CNN (more info about its architecture and hyperparameters can
be found on [my homepage](https://zubrod-eds.de/en/2021/04/11/bienen-klassifizieren-mit-cnns/)) had an accuracy higher than
99%, meaning that from the more than 850 bee pictures that were set aside for testing
the CNN only 8 were classified wrong.\n
Below you can test if you can compete with the predictive power of the CNN.
Try to guess from the pictures above which subspecies the bee on the image
belongs to and check the respective box. You can repeat this as often as you wish
by clicking the button below. Good luck 😊"""
st.write(intro2)
#New image button
state = SessionState.get(img=None, path=None, label=None, choice=None)
col1, col2, col3, col4, col5 = st.beta_columns(5)
if col3.button("Load a pic"):
test_pic = df.sample(1)
test_pic_file_name = list(test_pic.file)
state.label = test_pic.subspecies.to_string(index = False)
state.path = "test_images/"+test_pic_file_name[0]
state.img = Image.open(state.path)
try:
col1, col2, col3, col4, col5 = st.beta_columns(5)
col3.image(state.img, width = 100)
except:
pass
#Choice button
subspecies_options = ["Carniolan honey bee", "Italian honey bee",
"Russian honey bee", "Western honey bee"]
col1, col2, col3 = st.beta_columns(3)
radio = col2.radio("Choose a subspecies:", subspecies_options)
if radio:
state.choice = radio
#Submission and prediction
col1, col2, col3, col4, col5 = st.beta_columns(5)
check = col3.button("Submit")
all_subspecies = ['Carniolan honey bee', 'Italian honey bee', 'Russian honey bee',
'VSH Italian honey bee', 'Western honey bee']
if check:
try:
prediction = np.asscalar(np.argmax(model.predict(np.expand_dims(read_image(),
axis=0)),
axis = 1))
col1, col2, col3, col4, col5 = st.beta_columns((1,2,2,2,1))
col2.write("True subspecies:")
col3.write(state.label)
col1, col2, col3, col4, col5 = st.beta_columns((1,2,2,2,1))
col2.write("Model prediction:")
col3.write(all_subspecies[prediction])
if state.label == all_subspecies[prediction]:
col4.markdown(":white_check_mark:")
else:
col4.markdown(":x:")
col1, col2, col3, col4, col5 = st.beta_columns((1,2,2,2,1))
col2.write("Your guess:")
col3.write(state.choice)
if state.label == state.choice or (state.label == "VSH Italian honey bee"
and state.choice == "Italian honey bee"):
col4.markdown(":white_check_mark:")
else:
col4.markdown(":heavy_exclamation_mark:")
except:
pass
st.markdown('##')
st.image(red_line)
st.write("Created by Jochen Zubrod as part of the [Environmental Data Science \
Playground](https://zubrod-eds.de/en/playground/)")
st.image(red_line)
st.write("Picture sources")
col1, col2, col3, col4 = st.beta_columns(4)
col1.write("[Carniolan honey bee](https://upload.wikimedia.org/wikipedia/commons/1/14/Carnica_bee_on_Hylotelephium_%27Herbstfreude%27.jpg)")
col2.write("[Italian honey bee](https://upload.wikimedia.org/wikipedia/commons/thumb/c/cd/Honeybee-27527-1.jpg/1280px-Honeybee-27527-1.jpg)")
col3.write("[Russian honey bee](https://upload.wikimedia.org/wikipedia/commons/thumb/3/3f/Drinking_Bee2.jpg/1280px-Drinking_Bee2.jpg)")
col4.write("[Western honey bee](https://upload.wikimedia.org/wikipedia/commons/thumb/4/4d/Apis_mellifera_Western_honey_bee.jpg/1024px-Apis_mellifera_Western_honey_bee.jpg)")
|
19,301 | 8a522cfa43cbc8b4967ba396c5eb8ed4695de676 | import re
MAX_BETA_STR = f"{0xffffffff}.{0xffff}.{0xffff}"
class OPCLIVersion:
def __init__(self, version: str, skip_beta=False):
if isinstance(version, int):
version = str(version)
version, beta_num = self._parse_beta(version)
version_tuple = version.split(".")
if not skip_beta:
self._beta_num = beta_num
else:
self._beta_num = -1
parts = []
for part in version_tuple:
parts.append(int(part, 0))
self._parts = parts
@property
def beta_ver(self):
# if this is not a beta, set beta_ver to an absurdly high value
# so it beats any actual beta version
# this will make lt/gt/eq comparison logic simpler
beta_ver = None
if self._beta_num is None:
beta_ver = OPCLIVersion(MAX_BETA_STR, skip_beta=True)
elif self._beta_num == -1:
pass
else:
beta_ver = OPCLIVersion(self._beta_num, skip_beta=True)
return beta_ver
@property
def is_beta(self) -> bool:
_is_beta = self._beta_num is not None and self._beta_num >= 0
return _is_beta
def _parse_beta(self, version_string):
regex = r".*(-beta.*)$"
beta_num = None
match = re.match(regex, version_string)
if match:
# get "-beta.01"
beta_string = match.groups()[0]
# strip -beta.01 from the end of version_string, leaving "2.18.0"
version_string = re.sub(beta_string, '', version_string)
# extract '01' from '-beta.01'
beta_num = beta_string.split(".")[1]
# convert beta num to an int
beta_num = int(beta_num)
return version_string, beta_num
def _normalize(self, other):
parts_self = list(self._parts)
parts_other = list(other._parts)
# be robust about version strings of differing lenghts
# e.g., 11.3.1 vs 11.3.1.1
diff_len = len(parts_self) - len(parts_other)
# if the length difference was negative, _parts_other is longer,
# and we need to exitend _parts self
while diff_len < 0:
parts_self.append(0)
diff_len += 1
# if diff was positive, we need to extend _parts other
while diff_len > 0:
parts_other.append(0)
diff_len -= 1
# appending the OPCLIVersion object for the beta version number
# will allow it to be transparently compared just like all the other version parts
# then the lt/gt/eq logic doesn't have to change
beta_ver = self.beta_ver
if beta_ver is not None:
parts_self.append(beta_ver)
beta_ver = other.beta_ver
if beta_ver is not None:
parts_other.append(beta_ver)
# now 11.3.1 vs 11.3.1.1 becomes
# 11.3.1.0 vs 11.3.1.1, and can be compared 1 to 1
return (parts_self, parts_other)
def __str__(self):
beta_part = None
if self.is_beta:
beta_part = f"-beta.{self._beta_num:02d}"
_str = ".".join([str(i) for i in self._parts])
if beta_part:
_str += beta_part
return _str
def __eq__(self, other):
equal = id(self) == id(other)
if not equal:
if not isinstance(other, OPCLIVersion):
other = OPCLIVersion(other)
parts_self, parts_other = self._normalize(other)
equal = True
for i, part in enumerate(parts_self):
if part != parts_other[i]:
equal = False
break
return equal
def __ne__(self, other):
ne = not self.__eq__(other)
return ne
def __lt__(self, other):
if id(self) == id(other):
return False
if not isinstance(other, type(self)):
other = OPCLIVersion(other)
lt = False
_parts_self, _parts_other = self._normalize(other)
for i, part in enumerate(_parts_self):
if part > _parts_other[i]:
break
elif part == _parts_other[i]:
continue
elif part < _parts_other[i]:
lt = True
break
return lt
def __le__(self, other):
le = self.__lt__(other) or self.__eq__(other)
return le
MINIMUM_SERVICE_ACCOUNT_VERSION = OPCLIVersion('2.18.0-beta.01')
DOCUMENT_BYTES_BUG_VERSION = OPCLIVersion('2.2.0')
|
19,302 | d6d1ed4f5effe3095ba8d623d9ad351c05680daf | from rest_framework import serializers
from django.contrib.auth import get_user_model
from .models import Task
User = get_user_model()
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ['id', 'username', ]
class TaskSerializer(serializers.ModelSerializer):
estimated_finish_time = serializers.DateTimeField()
owner = UserSerializer(read_only=True)
class Meta:
model = Task
fields = ['id', 'owner', 'body', 'estimated_finish_time', 'is_finished', ]
read_only_fields = ['is_finished', ]
|
19,303 | a49678dabbe727d8e3a11fdffceb5f0dd83d5a08 | from mtcnn.mtcnn import MTCNN
import cv2
import argparse
import os, sys
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('--read_path', type=str, default='', help='path to image file or directory to images')
parser.add_argument('--name', type=str, default='', help='path to image file or directory to images')
return parser.parse_args()
def extract_face(filename, required_size=(160, 160)):
# load image from file
print(filename)
image = cv2.imread(filename)
detector = MTCNN()
results = detector.detect_faces(image)
if results == []:
return None, None
x1, y1, width, height = results[0]['box']
# bug fix
x1, y1 = abs(x1), abs(y1)
x2, y2 = x1 + width, y1 + height
# extract the face
face = image[y1:y2, x1:x2]
# resize pixels to the model size
face_array = cv2.resize(face, required_size)
return results, face_array
def load_faces(directory):
faces = list()
results = list()
# enumerate files
for filename in os.listdir(directory):
# path
path = directory + filename
if os.path.isdir(path):
continue
# get face
result, face = extract_face(path)
if result is None:
continue
# store
results.append(result)
faces.append([filename,face])
return results, faces
def save_face_image(name, save_path, faces):
for i, face in enumerate(faces):
path = save_path+ face[0]
print(path)
cv2.imwrite(path, face[1])
if __name__ == '__main__':
args = get_args()
read_path = args.read_path
name = args.name
if not os.path.exists(read_path):
print("존재하지 않는 경로입니다.")
sys.exit(-1)
else:
if read_path[-1] == '/':
save_path = read_path+"crop/"
else:
read_path += '/'
save_path = read_path+"crop/"
if not os.path.exists(save_path):
os.mkdir(save_path)
_, faces = load_faces(read_path)
save_face_image(name=name, save_path=save_path, faces = faces)
|
19,304 | a9fea29c70a4825e50e3949e4940bdc7c3f07e6d | # rf_grid_search.py
import pandas as pd
import numpy as np
from sklearn import ensemble
from sklearn import metrics
from sklearn import model_selection
if __name__ == "__main__":
# read the training data
df = pd.read_csv('dataset/train.csv')
# features are all columns without price_range
# Note that there is no Id column in this dataset
# here we have training features
x = df.drop('price_range' , axis=1).values
# and the targets
y = df.price_range.values
# define the model here
classifier = ensemble.RandomForestClassifier(n_jobs = -1)
# define a grid of parameters this can be a dictionary or a list of dictionary
#
params_grid = {
'n_estimators' : [100, 200, 250, 300, 400, 500],
'max_depth': [1,2,5,7,11,15],
'criterion': ['gini' , 'entropy'],
}
# initialize grid search estimator is the model that we have defined
# param_grid is the grid of parameters we use accuracy as our metric
# higher value of verbose implies a lot of details are printed
# cv= 5 means we are using 5 folds cv
model = model_selection.GridSearchCV(
estimator= classifier,
param_grid= params_grid,
scoring='accuracy',
verbose=10,
n_jobs=1,
cv=5
)
# fit the model and extract best score
model.fit(x,y)
print(f"Best score : {model.best_score_}")
print("Best parameters set:")
best_parameters = model.best_estimator_.get_params()
for param_name in sorted(params_grid.keys()):
print(f"\t{param_name} : {best_parameters[param_name]}") |
19,305 | b6711cebc8e1bf9c0af904941b56806c33f836d0 | from django.contrib import admin
from .models import Message
class MessageAdmin(admin.ModelAdmin):
pass
admin.site.register(Message, MessageAdmin) |
19,306 | 3f3be192651ae5345fe41b48f121840d0e53522a | contador = 0
numero = 0
maior = 0
menor = 0
while(contador < 4):
numero = int(input("Informe um número: "))
if contador == 0:
maior = numero
menor = numero
if numero < menor:
menor = numero
if numero > maior:
maior = numero
contador += 1
else:
print("--------------------------")
print("O maior número é: ", maior)
print("O menor número é: ", menor)
|
19,307 | 605d3d176e4732255d3aa7ebf9999ef73ddf90dc | from functools import partial
import jsonlines
from gensim.models import KeyedVectors
import os
dirname = os.path.dirname(__file__)
join_path = partial(os.path.join, dirname)
with jsonlines.open(join_path('data/cambridge.sense.000.jsonl')) as f:
senses = list(f)
def_embeds = KeyedVectors.load_word2vec_format(
join_path('data/cambridge.sense.000.sg.def_embeds.txt'), binary=False)
|
19,308 | 763a3ad028134b2e9f92d5ad6d42789988e75b73 | """
Test ACL Transaction Source Code Examples
"""
import pytest
from ethereum_test_forks import Fork, London, is_fork
from ethereum_test_tools import AccessList, Account, Environment
from ethereum_test_tools import Opcodes as Op
from ethereum_test_tools import StateTestFiller, Transaction
REFERENCE_SPEC_GIT_PATH = "EIPS/eip-2930.md"
REFERENCE_SPEC_VERSION = "c9db53a936c5c9cbe2db32ba0d1b86c4c6e73534"
@pytest.mark.valid_from("Berlin")
@pytest.mark.valid_until("London")
def test_access_list(state_test: StateTestFiller, fork: Fork):
"""
Test type 1 transaction.
"""
env = Environment()
pre = {
"0x000000000000000000000000000000000000aaaa": Account(
balance=0x03,
code=Op.PC + Op.SLOAD + Op.POP + Op.PC + Op.SLOAD,
nonce=1,
),
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(
balance=0x300000,
nonce=0,
),
}
tx = Transaction(
ty=1,
chain_id=0x01,
nonce=0,
to="0x000000000000000000000000000000000000aaaa",
value=1,
gas_limit=323328,
gas_price=7,
access_list=[
AccessList(
address="0x0000000000000000000000000000000000000000",
storage_keys=[
"0x0000000000000000000000000000000000000000000000000000000000000000",
],
)
],
secret_key="0x45a915e4d060149eb4365960e6a7a45f334393093061116b197e3240065ff2d8",
protected=True,
)
post = {
"0x000000000000000000000000000000000000aaaa": Account(
code="0x5854505854",
balance=4,
nonce=1,
),
"0x2adc25665018aa1fe0e6bc666dac8fc2697ff9ba": Account(
balance=0x1BC16D674EC80000 if is_fork(fork, London) else 0x1BC16D674ECB26CE,
),
"0xa94f5374fce5edbc8e2a8697c15331677e6ebf0b": Account(
balance=0x2CD931,
nonce=1,
),
}
state_test(env=env, pre=pre, post=post, txs=[tx])
|
19,309 | 0f7d2bf0b13f50eff13a8b0bd1663a76d1966b56 | """
Simulation results data encapsulation, for PyBERT.
Original Author: David Banas <capn.freako@gmail.com>
Original Date: 9 May 2017
This Python script provides a data structure for encapsulating the
simulation results data of a PyBERT instance. It was first
created, as a way to facilitate easier pickling, so that a particular
result could be saved and later restored, as a reference waveform.
Copyright (c) 2017 by David Banas; All rights reserved World wide.
"""
from chaco.api import ArrayPlotData
class PyBertData:
"""
PyBERT simulation results data encapsulation class.
This class is used to encapsulate that subset of the results
data for a PyBERT instance, which is to be saved when the user
clicks the "Save Results" button.
"""
_item_names = [
"chnl_h",
"tx_out_h",
"ctle_out_h",
"dfe_out_h",
"chnl_s",
"tx_s",
"ctle_s",
"dfe_s",
"tx_out_s",
"ctle_out_s",
"dfe_out_s",
"chnl_p",
"tx_out_p",
"ctle_out_p",
"dfe_out_p",
"chnl_H",
"tx_H",
"ctle_H",
"dfe_H",
"tx_out_H",
"ctle_out_H",
"dfe_out_H",
"tx_out",
"rx_in",
]
def __init__(self, the_PyBERT):
"""
Copy just that subset of the supplied PyBERT instance's
'plotdata' attribute, which should be saved during pickling.
"""
plotdata = the_PyBERT.plotdata
the_data = ArrayPlotData()
for item_name in self._item_names:
the_data.set_data(item_name, plotdata.get_data(item_name))
self.the_data = the_data
|
19,310 | dce66d82071dc2eff24ba0fc8e9a9e9a8da07161 | import matplotlib.image as mpimg
import matplotlib.pyplot as plt
import numpy as np
import cv2
import glob
import time
from sklearn.svm import SVC
from sklearn.preprocessing import StandardScaler
from skimage.feature import hog
from utils import extract_features
from parameters import *
from sklearn.utils import shuffle
import pickle
# Read in cars and notcars
cars = glob.glob('data/vehicles/*/*.png')
cars_test = glob.glob('data/vehicles_test/*.png')
notcars = glob.glob('data/non-vehicles/*/*.png')
notcars_test = glob.glob('data/non-vehicles_test/*.png')
def data_look(car_list, car_test_list, notcar_list, notcar_test_list):
data_dict = {}
# Define a key in data_dict "n_cars" and store the number of car images
data_dict["n_cars"] = len(car_list) + len(car_test_list)
# Define a key "n_notcars" and store the number of notcar images
data_dict["n_notcars"] = len(notcar_list) + len(notcar_test_list)
# Read in a test image, either car or notcar
# Define a key "image_shape" and store the test image shape 3-tuple
example_img = mpimg.imread(car_list[0])
data_dict["image_shape"] = example_img.shape
# Define a key "data_type" and store the data type of the test image.
data_dict["data_type"] = example_img.dtype
# Return data_dict
return data_dict
def data_extract_features(data):
return extract_features(data, color_space=COLOR_SPACE,
spatial_size=SPATIAL_SIZE, hist_bins=HIST_BINS,
orient=ORIENT, pix_per_cell=PIX_PER_CELL,
cell_per_block=CELL_PER_BLOCK,
hog_channel=HOG_CHANNEL, spatial_feat=SPATIAL_FEAT,
hist_feat=HIST_FEAT, hog_feat=HOG_FEAT)
def split_data():
car_features = data_extract_features(cars)
car_test_features = data_extract_features(cars_test)
notcar_features = data_extract_features(notcars)
notcar_test_features = data_extract_features(notcars_test)
X = np.vstack((car_features, notcar_features)).astype(np.float64)
# Fit a per-column scaler
X_scaler = StandardScaler().fit(X)
# Apply the scaler to X
X_train = X_scaler.transform(X)
X_test = X_scaler.transform(
np.vstack((car_test_features, notcar_test_features)).astype(np.float64))
# Define the labels vector
y_train = np.hstack((np.ones(len(car_features)),
np.zeros(len(notcar_features))))
# Define the labels vector
y_test = np.hstack((np.ones(len(car_test_features)),
np.zeros(len(notcar_test_features))))
rand_state = np.random.randint(0, 100)
# shuffle
X_train, y_train = shuffle(X_train, y_train, random_state=rand_state)
X_test, y_test = shuffle(X_test, y_test, random_state=rand_state)
# to make sure
rand_state = np.random.randint(0, 100)
X_train, y_train = shuffle(X_train, y_train, random_state=rand_state)
X_test, y_test = shuffle(X_test, y_test, random_state=rand_state)
return X_train, X_test, y_train, y_test, X_scaler
def load_data(regenerate=False):
dict_data = {}
if regenerate:
X_train, X_test, y_train, y_test, X_scaler = split_data()
dict_data["X_train"] = X_train
dict_data["X_test"] = X_test
dict_data["y_train"] = y_train
dict_data["y_test"] = y_test
dict_data["X_scaler"] = X_scaler
pickle.dump(dict_data, open('_data.p', 'wb'))
else:
dict_data = pickle.load(open("_data.p", 'rb'))
return dict_data
def train_model(dict_data):
model = SVC(kernel="linear", C=0.1)
t = time.time()
model.fit(dict_data['X_train'], dict_data['y_train'])
t2 = time.time()
print(round(t2 - t, 2), 'Seconds to train model...')
# save
dist_pickle = {
'model': model,
'X_scaler': dict_data["X_scaler"]
}
pickle.dump(dist_pickle, open('model_linear.p', 'wb'))
print('Test Accuracy of model = ', round(
dist_pickle["model"].score(dict_data['X_test'], dict_data['y_test']), 4))
def main():
# PRINT SOME INFO
data_info = data_look(cars, cars_test, notcars, notcars_test)
dict_data = load_data()
print(data_info["n_cars"], ' cars and',
data_info["n_notcars"], ' non-cars')
print('of size: ', data_info["image_shape"],
' and data type:', data_info["data_type"])
print('Using:', ORIENT, 'orientations', PIX_PER_CELL,
'pixels per cell and', CELL_PER_BLOCK, 'cells per block')
print('Feature vector length:', len(dict_data['X_train'][0]))
train_model(dict_data)
if __name__ == "__main__":
main()
|
19,311 | e9e6429fa5ee547009efa652f8a9c774b9aadd83 | import sys
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
class KNNBase:
def __init__(self, k):
self.k = k
def fit(self, x, y):
self.x = x
self.y = y
def __str__(self):
return "KNN Classifier"
class KNNClassifier(KNNBase):
def __str__(self):
return "KNNClassifier({!s})".format(self.k)
def predict(self, data):
size = self.x.shape[0]
diff_matrix = np.tile(data, (size, 1)) - self.x
sq_matrix = diff_matrix ** 2
sum_matrix = sq_matrix.sum(axis=1)
distances = sum_matrix ** 0.5
sorted_indicies = distances.argsort()
class_count = {}
for i in range(self.k):
vote = self.y[sorted_indicies[i]]
class_count[vote] = class_count.get(vote, 0) + 1
sorted_class_count = sorted(class_count.items(), reverse=True)
return sorted_class_count[0][0]
class KNNRegressor(KNNBase):
def __str__(self):
return "KNNRegressor({!s})".format(self.k)
def predict(self, data):
size = self.x.shape[0]
diff_matrix = np.tile(data, (size, 1)) - self.x
sq_matrix = diff_matrix ** 2
sum_matrix = sq_matrix.sum(axis=1)
distances = sum_matrix ** 0.5
sorted_indicies = distances.argsort()
total_sum = 0
for i in range(self.k):
total_sum += self.y[sorted_indicies[i]]
return total_sum / self.k
def demo(k, dir="", row=5000):
try:
data = pd.read_csv(dir + "train.csv")
print("Data loaded successfully.")
except:
print("Can not find train.csv in the directory specified.")
print("To download, please visit https://www.kaggle.com/c/digit-recognizer/data.")
x = data.values[0:row, 1:]
y = data.values[0:row, 0]
train_x, test_x, train_y, test_y = train_test_split(x, y, test_size=0.2, random_state=0)
print("Data splitted for validation.")
classifier = KNNClassifier(k)
classifier.fit(train_x, train_y)
size = test_x.shape[0]
predictions = []
for i in range(size):
result = classifier.predict(test_x[i])
predictions.append(result)
print("Prediction completes.")
print("Validation score: {}".format(accuracy_score(test_y, predictions)))
print("To learn more about K-Nearest Neighbor Algorithm: https://en.wikipedia.org/wiki/K-nearest_neighbors_algorithm") |
19,312 | 523c27a48db20264159fd2c6e6f12fce2020cb38 | import random
import numpy as np
from functools import lru_cache
basic_style = {
0: '0',
1: '1',
2: '2',
3: '3',
4: '4',
5: '5',
6: '6',
7: '7',
8: '8',
9: ' ',
'o': 'o',
'x': 'x',
}, lambda x, s: s
default_style = basic_style
try:
from sty import fg, bg
sty_style = {
0: bg.white + ' ' + bg.rs,
1: bg.white + fg.blue + '1' + fg.rs + bg.rs,
2: bg.white + fg.green + '2' + fg.rs + bg.rs,
3: bg.white + fg.red + '3' + fg.rs + bg.rs,
4: bg.white + fg.da_blue + '4' + fg.rs + bg.rs,
5: bg.white + fg.da_red + '5' + fg.rs + bg.rs,
6: bg.white + fg.da_green + '6' + fg.rs + bg.rs,
7: bg.white + fg.magenta + '7' + fg.rs + bg.rs,
8: bg.white + fg.black + '8' + fg.rs + bg.rs,
9: ' ',
'o': fg.black + 'o' + fg.rs,
'x': fg.black + 'x' + fg.rs,
}, lambda x, s: bg(int(x * 255.0), (255 - (int(x * 255.0))), 0) + s + bg.rs
default_style = sty_style
except ImportError:
pass
def neighbors_do(pos, height, width):
'''Find the neighbors of a given position in a grid.'''
i, j = pos
for ii in range(max(i-1, 0), min(i+2, height)):
for jj in range(max(j-1, 0), min(j+2, width)):
yield (ii, jj)
@lru_cache(maxsize=81)
def neighbors(pos, height, width):
return list(neighbors_do(pos, height, width))
def format_move(view, mines, pos, style=None, risk_matrix=None):
if style is None:
style = default_style
result = [[style[0][v] for v in row] for row in view]
if pos is not None:
if pos in mines:
for (i, j) in mines:
result[i][j] = style[0]['x']
result[pos[0]][pos[1]] = style[0]['o']
else:
i, j = pos
result[i][j] = style[0]['o']
if risk_matrix is not None:
for i, row in enumerate(view):
for j, v in enumerate(row):
if v == 9:
r = risk_matrix[i][j]
result[i][j] = style[1](r, result[i][j])
return '\n'.join(''.join(row) for row in result)
class Game:
def __init__(self, height, width, mines):
self.height = height
self.width = width
self.guessed = set()
self.mines = set()
self.add_mines(mines)
# Adds mines randomly.
def add_mines(self, mines):
for _ in range(mines):
while True:
i = random.randint(0, self.height-1)
j = random.randint(0, self.width-1)
pos = (i, j)
if pos in self.mines:
continue
self.mines.add(pos)
break
def __repr__(self):
return format_move(self.view(), self.mines, None)
# Returns True if a mine was hit.
def guess(self, pos):
if not self.guessed and pos in self.mines:
# If the first guess was unlucky then move the mine.
while pos in self.mines:
self.mines.remove(pos)
self.add_mines(1)
if pos in self.mines:
return True
if pos in self.guessed:
return None
self.spread(pos)
return False
def count_nearby_mines(self, pos):
result = 0
for n in neighbors(pos, self.height, self.width):
if n in self.mines:
result += 1
return result
def spread(self, pos):
'''spreads a guess out'''
if pos in self.guessed:
return
self.guessed.add(pos)
if self.count_nearby_mines(pos) > 0:
return
for n in neighbors(pos, self.height, self.width):
self.spread(n)
def is_won(self):
return len(self.guessed) + len(self.mines) == self.height * self.width
def view(self):
'''machine readable representation of what's seen'''
result = np.zeros((self.height, self.width), dtype=np.int8) + 9
for (i, j) in self.guessed:
result[i, j] = self.count_nearby_mines((i, j))
return result
|
19,313 | b66d239a268b9c6a496c29a799a823468cfcb53a | #WAP to accept 2 strings from the user and swap their first two character
s1=str(input("Enter the string"))
s2=str(input("Enter 2nd string"))
st1=s1[:2]
st2=s2[:2]
s1=st2+s1[2:]
s2=st1+s2[2:]
print (s1)
print (s2)
|
19,314 | 07e5759d79d147892eb228829593e80045c645bf | """Contract test cases for start."""
import json
from typing import Any
import pytest
import requests
@pytest.mark.contract
def test_create_startliste(http_service: Any) -> None:
"""Should return status 201."""
url = f"{http_service}/start"
with open("tests/files/G11KvartStart.json") as json_file:
data = json.load(json_file)
headers = {"content-type": "application/json; charset=utf-8"}
response = requests.post(url, headers=headers, json=data)
assert response.status_code == 201
@pytest.mark.contract
def test_start(http_service: Any) -> None:
"""Should return status 200 and html."""
url = f"{http_service}/start"
response = requests.get(url)
assert response.status_code == 200
assert response.headers["content-type"] == "text/html; charset=utf-8"
assert len(response.text) > 0
|
19,315 | f75d82595adc704b8da82af6fc2e63405ebc148b | import os
import unittest
import sys
import json
sys.path.append("../IFK")
os.environ['APP_SETTINGS'] = "config.TestingConfig"
from IFK import app
class TestCase(unittest.TestCase):
def setUp(self):
self.app = app.test_client()
def add_member(self, name, phone=None):
data = json.dumps({"name": name,"phone": phone})
response = self.app.put('/members', data=data, content_type='application/json')
return json.loads(response.data), response.status_code
def add_section(self, code, name,leader):
data = json.dumps({"code": code, "name": name, "leader": leader})
response = self.app.put('/sections', data=data, content_type='application/json')
return json.loads(response.data), response.status_code
def add_section_member(self, code, member_id):
data = json.dumps({"code": code, "name": name})
response = self.app.put('/section/%s/members', data=data, content_type='application/json')
return json.loads(response.data), response.status_code
def test_add_section_members(self):
#add leader member
response,status = self.add_member("bosse")
member_id = response['id']
#add section
response,status = self.add_section(code="A", name="bosse", leader=member_id)
if __name__ == '__main__':
unittest.main() |
19,316 | 515dc317d2ea13a9d5961120766a77ef13751d42 | def soma(a, b):
s = a + b
print(f'A Soma de {a} + {b} é igual a {s}')
soma(4, 5)
soma(19, 100)
soma(6,11)
|
19,317 | 47402a8ab69e36334705d5cdf0fc86956949e57e | import unittest
from flask import Flask
from govuk_template.flask.mustache import GovukTemplate
from lxml.html import document_fromstring
def with_context(test):
def wrapped_test(self):
with self.app.app_context():
with self.app.test_request_context():
test(self)
return wrapped_test
class TestMustacheRendering(unittest.TestCase):
def setUp(self):
self.app = Flask('testCase')
self.app.config['TESTING'] = True
@with_context
def test_can_render_template(self):
text = GovukTemplate().render()
html = document_fromstring(text)
header = ''.join(html.xpath('//header//a/text()'))
self.assertTrue('GOV.UK' in header)
@with_context
def test_can_inject_context(self):
text = GovukTemplate().render(pageTitle="My wicked title")
html = document_fromstring(text)
title = html.xpath('//title/text()')
self.assertEqual(title, ["My wicked title"])
@with_context
def test_can_render_from_object(self):
class MyClass(object):
bodyClasses = "my-class"
def __init__(self, title):
self.pageTitle = title
def content(self):
return "<div class='my-div'>My Div</div>"
myObj = MyClass("My Title")
text = GovukTemplate().render(myObj)
html = document_fromstring(text)
title = html.xpath('//title/text()')
div = html.xpath('//div[@class="my-div"]/text()')
bodyClasses = html.xpath('//body/@class')
self.assertEqual(title, ['My Title'])
self.assertEqual(div, ['My Div'])
self.assertEqual(bodyClasses, ['my-class'])
|
19,318 | f6c1898ae8a2917a6d21aa29aa0f186b6c84245c | from typing import List
from typing import Tuple
import collections
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def subtreeWithAllDeepest(self, root: TreeNode) -> TreeNode:
if not root:
return root
def dfs(node: TreeNode) -> Tuple[int, TreeNode]:
if not node:
return (0, None)
h1, lca1 = dfs(node.left)
h2, lca2 = dfs(node.right)
if h1 > h2:
return (h1 + 1, lca1)
if h1 < h2:
return (h2 + 1, lca2)
return (h1 + 1, node)
return dfs(root)[1]
def convert2TreeNode(nums: List[int]) -> TreeNode:
nodes = []
for num in nums:
if str(num) == 'null' or num == None:
nodes.append(None)
else:
node = TreeNode(num)
nodes.append(node)
if not nodes or len(nodes) == 0:
return None
curr = 0
level = 0
next_child = 2 ** level
while next_child < len(nodes):
for i in range(curr, curr + 2 ** level):
if nodes[i]:
nodes[i].left = nodes[next_child] if next_child < len(nodes) else None
nodes[i].right = nodes[next_child + 1] if next_child + 1 < len(nodes) else None
next_child += 2
curr = i + 1
level += 1
return nodes[0]
def levelOrderTraversal(root: TreeNode) -> List[int]:
ans = []
q = collections.deque([root])
while q:
node = q.popleft()
ans.append(node.val)
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
return ans
if __name__== '__main__':
solution = Solution()
nums = [3,5,1,6,2,0,8,'null','null',7,4]
root = convert2TreeNode(nums)
ans = solution.subtreeWithAllDeepest(root)
print(levelOrderTraversal(ans)) |
19,319 | e56e733ca1ab00ac59d8178e06aafc7e253b0d46 | # -*- coding: utf-8 -*-
"""
Created on Sat Feb 6 12:20:07 2021
@author: Ender
"""
"""
Script para concatenar vectores LBP con los distintos radios
R = 1,2,4,6,8
"""
import pandas as pd
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif
from sklearn.feature_selection import VarianceThreshold
print("Importando data...")
data1= pd.read_csv("utk_dataset/LBP_bloques96x96_1.csv",header=0)
data2= pd.read_csv("utk_dataset/LBP_bloques96x96_2.csv",header=0)
data3= pd.read_csv("utk_dataset/LBP_bloques96x96_4.csv",header=0)
data4= pd.read_csv("utk_dataset/LBP_bloques96x96_6.csv",header=0)
data5= pd.read_csv("utk_dataset/LBP_bloques96x96_8.csv",header=0)
#Feature selection
Fs= SelectKBest(score_func=f_classif)
#Lista con las caracteristicas de R=1...8
d= [data1,data2,data3,data4,data5]
# Extraer TODAS LAS etiquetas
#UTK
labels= data1[['genero','edad','raza']]
constant_filter = VarianceThreshold(threshold=0) # eliminr features con
# varianza cero
X_new=[]
for i in range(0,5):
X= d[i].drop(['genero','edad','raza'],axis=1)
#X= data.drop(['genero','raza','edad'],axis=1)
# Remover características con varianza cero
X=constant_filter.fit_transform(X)
# k=int(len(X[1])/2)
# x = SelectKBest(f_classif, k=k).fit_transform(X, Y)
x=pd.DataFrame(X)
X_new.append(x)
print("Data %d procesada" % i)
print("Features:", X_new[i].shape)
print("Guardando features en un csv...")
Features_final=pd.concat([X_new[0],X_new[1],X_new[2],X_new[3],X_new[4],labels],axis=1)
Features_final.to_csv('LBP_utk_96x96_conc.csv',index=False)
#
|
19,320 | 945a5eab46f031d52d0e30c499587c8d1eebcdfb | # Generated by Django 3.0.2 on 2020-01-19 02:15
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('movies', '0002_myfavoritemovies'),
]
operations = [
migrations.RenameField(
model_name='myfavoritemovies',
old_name='idMovie',
new_name='movie',
),
]
|
19,321 | 7a0778bd2cb12c52a9cf7067d532b19353ac417c | import re
def match_regex(filename, regex): # СОЗДАЕМ КОРУТИНУ, В КОТОРУЮ БУДЕМ ПЕРЕДАВАТЬ РЕГУЛЯРНЫЕ ВЫРАЖЕНИЯ
with open(filename) as file:
lines = file.readlines() # СЧИТЫВАЕМ ВСЕ СТРОКИ
for line in reversed(lines): # И БЕЖИМ ПО НИМ В ОБРАТНОМ ПОРЯДКЕ
match = re.match(regex, line) # ПРИМЕНЯЯ К НИМ РЕГУЛЯРКУ
if match: # ЕСЛИ НАШЛИ СОВПАДЕНИЕ, ВОЗВРАЩАЕМ СОХРАНЕННУЮ ГРУППУ, И ГОТОВИМСЯ
regex = yield match.groups()[0] # ПРИНЯТЬ НОВУЮ РЕГУЛЯРКУ
def get_serials(filename):
ERROR_RE = 'XFS ERROR (\[sd[a-z]\])' # ГОТОВИМ РЕГУЛЯРКУ ДЛЯ КОРУТИНЫ. В НЕЙ МЫ ИЩЕМ ОШИБКУ XFS ERROR В ФАЙЛЕ
matcher = match_regex(filename, ERROR_RE) # ПОДГОТАВЛИВАЕМ КОРУТИНУ
device = next(matcher) # И ИНИЦИАЛИЗИРУЕМ ЕЕ. В ПЕРЕМЕННОЙ device - ПЕРВОЕ СООБЩЕНИЕ ОБ ОШИБКЕ
while True: # XFS ERROR. ТОЧНЕЕ - ИМЯ УСТРОЙСТВА
bus = matcher.send('(sd \S+) {}.*'.format(re.escape(device))) # ОТПРАВЛЯЕМ В КОРУТИНУ НОВУЮ РЕГУЛЯРКУ, КОТОРАЯ
# ИЩЕТ ШИНУ НА КОТОРОЙ ПРОИЗОШЛА ОШИБКА. МЫ
# ПРОДОЛЖАЕМ БЕЖАТЬ НАЗАД ПО ФАЙЛУ В КОРУТИНЕ
# ТОЛЬКО ПРИМЕНЯЕМ К СТРОКАМ УЖЕ ЭТУ РЕГУЛЯРКУ!
serial = matcher.send('{} \(SERIAL=([^)]*)\)'.format(bus)) # ТАК ЖЕ, КАК В ПРЕДЫДУЩЕЙ СТРОКЕ ОТПРАВЛЯЕМ НОВУЮ
# РЕГУЛЯРКУ, ОСНОВЫВАЯСЬ НА РЕЗУЛЬТАТАХ ПОИСКА
# ПРЕДЫДУЩЕЙ. В ПЕРЕМЕННОЙ serial - ИСКОМЫЙ СЕРИЙНЫЙ
# НОМЕР ДИСКА
yield serial # ВОЗВРАЩАЕМ ЭТОТ СЕРИЙНЫЙ НОМЕР
device = matcher.send(ERROR_RE) # ВОССТАНАВЛИВАЕМ ПЕРВУЮ РЕГУЛЯРКУ, И ПРОДОЛЖАЕМ БЕЖАТЬ ПО ФАЙЛУ
for serial_number in get_serials('EXAMPLE_LOG.log'): # БЕЖИМ ПО ИТЕРАТОРУ И ПЕЧАТАЕМ СЕРИЙНЫЙ НОМЕР
print(serial_number) |
19,322 | 5391dde94087647d41e1d9b215fa989f2eb34b4c | #! /usr/bin/python
# -*- coding: utf-8 -*-
__author__ = 'nparslow'
import json
import re
import os
import sys
import tarfile
import xml.etree.cElementTree as ET
from gensim import corpora, models, similarities, matutils
import logging
import multiprocessing
#class FileAnalyser(object):
# def __init__(self):
# manager = multiprocessing.Manager()
# self.sentences = manager.list()
# self.jobs = []
# #self.sentences = sentences
def analyseTarFile( filename, sentences):
with tarfile.open(filename, mode='r') as tar:
#for filename in tar.getnames()[0:10]:
counter = 0
for tarinfo in tar:
if counter > 100: break
counter += 1
filename = tarinfo.name
basefilename = os.path.split(filename)[-1]
#print basefilename
if re.match(r"^frwikipedia_\w+\.E\w+\.dis\.dep\.xml$", basefilename):
# i.e. ignore the '.passage.xml' files
fileobject = tar.extractfile(filename)
#with tar.extractfile(filename) as fileobject: # with here provokes an attribute error in multiprocessor
analyseFileWiki(fileobject, sentences)
fileobject.close()
def analyseDirectory( inbasepath, jobs, sentences):
# it's a directory:
for element in os.listdir(inbasepath ):
in_full_element = inbasepath + os.sep + element # note don't use 'pathsep' as it's a colon
if os.path.isfile(in_full_element) and tarfile.is_tarfile(in_full_element):
p = multiprocessing.Process(target=analyseTarFile, args=(in_full_element, sentences, ))
jobs.append(p)
p.start()
#self.analyseTarFile(in_full_element )
elif os.path.isdir(in_full_element):
# analyse the directory
analyseDirectory(in_full_element, jobs, sentences )
def analyseAll( inbasepath):
manager = multiprocessing.Manager()
sentences = manager.list()
jobs = []
analyseDirectory(inbasepath, jobs, sentences)
for proc in jobs:
proc.join()
return sentences
def analyseFileWiki( fileobject, sentences ):
#sentences.append([])
#print "parsing:", fileobject.name
try:
tree = ET.parse(fileobject)
sentence = []
for node in tree.findall('node'):
lemma = node.get("lemma")
cat = node.get("cat")
#print "lemmma", lemma
if lemma not in ignorelemmas and lemma[0] != "_"\
and lemma not in stopwords and not re.match(r'\W+', lemma, re.UNICODE)\
and cat in acceptedCategories:
# TODO are there others apart from these? Uw is some sort of pronoun
#sentences[-1].append(lemma + "_" + cat)
sentence.append(lemma + "_" + cat)
# remove any unparseable sentences
#if len(sentences[sentenceNumber]) == 0: del sentences[sentenceNumber]
print(sentence)
#if len(sentences[-1]) < minLemmaQuantity:
# del sentences[-1] # require at least 3 lemmas in the sentence
if len(sentence) >= minLemmaQuantity:
sentences.append(tuple(sentence))
#return tuple(sentence)
except ET.ParseError as e:
# if the xml is unparseable (including if the file is empty) will come here
print "Parse error on file", fileobject.name
#return None
#print "sents:", sentences
class MyCorpus(object):
def __init__(self, sentences):
self.sentences = sentences
def __iter__(self):
for line in self.sentences:
# assume there's one document per line, tokens separated by whitespace
yield dictionary.doc2bow(line, allow_update=True) # causes problems
#print dictionary.doc2bow(line)
if __name__=='__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
# note can have a lemma in a best="no" (i.e. no parse found case), e.g. lemma='ne'
stopwords = {} #{'et', 'le', 'de', 'un', u'à', u'être', u'avoir', 'que', 'ce', 'ou', 'qui', } # a set
#ignorechars = ''',:'!.?'''
#ignorechars = {'\'', ':', '!', '?', '-', '.', ',', '(', ')'}
ignorelemmas = {"", "cln", "Uw", "cll", "ilimp", "cld", "clr", "cla"}
acceptedCategories = {"nc", "v", "adj", "adv"}
# ignore pro, det, prep, coo, Infl, N2, np, strace, S, VMod, aux, unknown, comp, incise etc.
minLemmaQuantity = 3 # min no. of lemmas to have in a sentence to keep the sentence
dictionary = corpora.Dictionary(None, None) # initialise dictionary with no phrases and no max size
if len(sys.argv) != 1: # first argument is always the name of the script
print len(sys.argv)
print("Usage: ./lsa_with_cefle") # script to call takes a student_name as input param
exit(1)
inbasepath = "/home/nparslow/Documents/AutoCorrige/frwiki/"
#fileanalyser = FileAnalyser()
#fileanalyser.analyseAll(inbasepath)
#sentences = fileanalyser.getSentences()
sentences = analyseAll(inbasepath)
print sentences
#outfilename = "/home/nparslow/Documents/fouille_de_textes/projet/lund/allSentences.json"
outfilename = "/home/nparslow/Documents/AutoCorrige/frwiki/testrun2.json"
with open(outfilename, 'w') as outfile:
json.dump(tuple(sentences), outfile)
corpus_memory_friendly = MyCorpus(sentences)
# collect statistics about all tokens
#dictionary = corpora.Dictionary(corpus_memory_friendly, prune_at=None)
dictionary = corpora.Dictionary(sentences) # dictionary stores conversion integer <-> word
# remove words that appear only once
once_ids = [tokenid for tokenid, docfreq in dictionary.dfs.iteritems() if docfreq == 1]
dictionary.filter_tokens(once_ids) # remove stop words and words that appear only once
dictionary.compactify() # remove gaps in id sequence after words that were removed
print(dictionary)
dictionary.save('wikitest.dict')
# to save the corpus in Market Matrix format, also SVMlight, Blei, Low possible
corpora.MmCorpus.serialize('wikicorpus.mm', corpus_memory_friendly)
#print(corpus_memory_friendly)
# convert from/to sparse (also numpy conversion possible)
#corpus = matutils.Sparse2Corpus(scipy_sparse_matrix)
#scipy_csc_matrix = matutils.corpus2csc(corpus_memory_friendly)
#print scipy_csc_matrix
|
19,323 | c4aacee166b7c321edcdb0ee35f6ecc34b45cac0 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
This experiment was created using PsychoPy2 Experiment Builder (v1.80.03), Sat Sep 20 20:39:42 2014
If you publish work using this script please cite the relevant PsychoPy publications
Peirce, JW (2007) PsychoPy - Psychophysics software in Python. Journal of Neuroscience Methods, 162(1-2), 8-13.
Peirce, JW (2009) Generating stimuli for neuroscience using PsychoPy. Frontiers in Neuroinformatics, 2:10. doi: 10.3389/neuro.11.010.2008
"""
from __future__ import division # so that 1/3=0.333 instead of 1/3=0
from psychopy import visual, core, data, event, logging, sound, gui
from psychopy.constants import * # things like STARTED, FINISHED
import numpy as np # whole numpy lib is available, prepend 'np.'
from numpy import sin, cos, tan, log, log10, pi, average, sqrt, std, deg2rad, rad2deg, linspace, asarray
from numpy.random import random, randint, normal, shuffle
import os # handy system and path functions
from psychopy.hardware.emulator import launchScan
# EXPERIMENTAL INFORMATION
expName = u'FlashCB' # from the Builder filename that created this script
expInfo = {u'session': u'001', u'participant': u''}
dlg = gui.DlgFromDict(dictionary=expInfo, title=expName)
if dlg.OK == False: core.quit() # user pressed cancel
expInfo['date'] = data.getDateStr() # add a simple timestamp
expInfo['expName'] = expName
# OUTPUT FILENAME
filename = 'data/%s_%s_%s' %(expInfo['participant'], expName, expInfo['date'])
# An ExperimentHandler isn't essential but helps with data saving
thisExp = data.ExperimentHandler(name=expName, version='',
extraInfo=expInfo, runtimeInfo=None,
originPath=None,
savePickle=True, saveWideText=True,
dataFileName=filename)
#save a log file for detail verbose info
logFile = logging.LogFile(filename+'.log', level=logging.EXP)
logging.console.setLevel(logging.WARNING) # this outputs to the screen, not a file
endExpNow = False # flag for 'escape' or other condition => quit the exp
# MRI INFORMATION FOR SYNCHRONIZATION
MR_settings = {
'TR': 2.000, # duration (sec) per volume
'volumes': 5, # number of whole-brain 3D volumes / frames
'sync': '5', # character to use as the sync timing event; assumed to come at start of a volume
'skip': 0, # number of volumes lacking a sync pulse at start of scan (for T1 stabilization)
'sound': False # in test mode only, play a tone as a reminder of scanner noise
}
# FULL SCREEN WINDOW
#win = visual.Window(size=(1920, 1080), fullscr=True, screen=0, allowGUI=False, allowStencil=False,
# monitor=u'testMonitor', color=[0,0,0], colorSpace=u'rgb',
# blendMode=u'avg', useFBO=True,
# )
# PARTIAL SCREEN WINDOW
win = visual.Window(size=[800,600], fullscr=False, screen=0, allowGUI=True, allowStencil=False,
monitor=u'testMonitor', color=[0,0,0], colorSpace=u'rgb',
blendMode=u'avg', useFBO=True,
)
# store frame rate of monitor if we can measure it successfully
expInfo['frameRate']=win.getActualFrameRate()
if expInfo['frameRate']!=None:
frameDur = 1.0/round(expInfo['frameRate'])
else:
frameDur = 1.0/60.0 # couldn't get a reliable measure so guess
# Initialize components for Routine "trial"
ISI = core.StaticPeriod(win=win, screenHz=expInfo['frameRate'], name='ISI')
# PREPARE THE STIMULI
grating1 = visual.GratingStim(win=win, name='grating1',
tex=u'sqrXsqr', mask=None,
ori=0, pos=[0, 0], size=[0.9, 0.9], sf=5, phase=0.0,
color=[1,1,1], colorSpace=u'rgb', opacity=1,
texRes=128, interpolate=True, depth=-1.0)
grating2 = visual.GratingStim(win=win, name='grating2',
tex=u'sqrXsqr', mask=None,
ori=0, pos=[0, 0], size=[0.9, 0.9], sf=5, phase=0.0,
color=[-1,-1,-1], colorSpace=u'rgb', opacity=1,
texRes=128, interpolate=True, depth=-2.0)
text = visual.TextStim(win=win, ori=0, name='text',
text=u'+', font=u'Arial',
pos=[0, 0], height=0.2, wrapWidth=None,
color=u'red', colorSpace=u'rgb', opacity=1,
depth=0.0)
# TIMERS
globalClock = core.Clock()
routineTimer = core.CountdownTimer() # to track time remaining of each (non-slip) routine
# EXPERIMENTAL PARAMETERS USED
ExpParamaters = {
'RestDuration' : 5,
'OnTimeDuration' : 5,
'NBlocks' : 2,
'flashRate' : 4 # Hertz
}
# DISPLAY PARAMETERS FOR THE USER TO CONFIRM
infoDlg = gui.DlgFromDict(ExpParamaters, title='Experimental Parameters')
# CALCULATED PARAMETERS
flashPeriod = 1/ExpParamaters['flashRate'] #seconds for one B-W cycle (ie 1/Hz)
BlockDur = ExpParamaters['RestDuration'] + ExpParamaters['OnTimeDuration']
# PRESENT THE SCREEN TO WAIT FOR THE MRI TRIGGER
vol = launchScan(win, MR_settings, mode='Scan')
#vol = launchScan(win, MR_settings, globalClock=globalClock)
# CYCLE OVER THE EXPERIMENTAL BLOCKS
for BlockIndex in range(0,ExpParamaters['NBlocks'],1):
# Start the timer
globalClock.reset()
# Start block with a cross-hair
ElapsedTime = (BlockIndex*BlockDur) + ExpParamaters['RestDuration']
# Write the elapsed time to the log file
thisExp.addData('ActualStartOffPeriod',globalClock.getTime())
thisExp.addData('ExpectedStartOffPeriod',ElapsedTime-ExpParamaters['RestDuration'])
while globalClock.getTime() < ElapsedTime:
text.draw()
win.flip()
ElapsedTime += ExpParamaters['OnTimeDuration']
thisExp.addData('ActualStartOnPeriod',globalClock.getTime())
thisExp.addData('ExpectedStartOnPeriod',ElapsedTime-ExpParamaters['OnTimeDuration'])
thisExp.nextEntry()
while globalClock.getTime() < ElapsedTime and not 'escape' in event.getKeys():
if (globalClock.getTime()%flashPeriod) < (flashPeriod/2.0):# (NB more accurate to use number of frames)
stim = grating1
else:
stim = grating2
#stim.setOri(t*rotationRate*360.0)
stim.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
ElaspedTime = (ExpParamaters['NBlocks']*BlockDur) + ExpParamaters['RestDuration']
while globalClock.getTime() < ElapsedTime:
text.draw()
win.flip()
if event.getKeys(keyList=["escape"]):
core.quit()
# completed 5 repeats of 'BlockLoop'
win.close()
core.quit()
|
19,324 | b9e2a73b7dc29a5e7d3bf76cbf740535dbe77da4 | """
Created on Oct 20, 2015
@author: ahmadjaved.se@gmail.com
"""
from math import ceil
from sqlalchemy import func
from .exceptions import PageNotAnInteger, EmptyPage
class Paginator(object):
"""
This class helps you to manage data with pagination. This class will fetch
data in pages. That is, instead of fetching all the records from database
at a time, this class will fetch defined number of records at a time.
So that you can perform particular action(s) on fetched data and then fetch
data for next page. In this way we can get ride from the memory overloading
problem as well.
This class will also optimized the query for fetching total number of
records from database against given query_set. Optimization will be applied
only on the query that will be used for fetching total number of records.
You can also provide the separate query in optional_count_query_set argument
for fetching total number of records.
..usage::
You can use this paginator module in python scripting code and in web
based application code as well.
:Example1:
>>> from sqlalchemy_paginator import Paginator
>>> query = session.query(MyModel)
>>> paginator = Paginator(query, 5)
>>> for page in paginator:
>>> print page.number # page number of current page in iterator
>>> print page.object_list # this is a list that contains the records of current page
:Example2:
>>> from sqlalchemy_paginator import Paginator
>>> query = session.query(MyModel)
>>> paginator = Paginator(query, 5)
>>> page = paginator.page(page_number)
>>> print page.paginator.count # to get total number of records against given query
>>> print page.paginator.total_pages # to get total number of pages
>>> print page.paginator.pages_range # to get range of pages in list
>>> print page.start_index # to get index of the first object on this page
>>> print page.end_index # to get index of the last object on this page
>>> if page.has_previous():
>>> print page.previous_page_number # to get previous page number
>>> if page.has_next():
>>> print page.next_page_number
"""
def __init__(self, query_set, per_page_limit, optional_count_query_set=None,
allow_empty_first_page=True):
"""
Constructor to create the paginator object.
:param query_set: SQLAlchemy query which is used for fetching data from
from database.
:type query_set: SQLAlchemy query object.
:param per_page_limit: Required number of records in a page.
:type per_page_limit: int.
:param optional_count_query_set: This is a optional query set that will
use to fetch the total number of
records from database. If this optional
query is not provided than this class
will optimized query_set query and used
that optimized query of getting total
number of records from database.
:type optional_count_query_set: SQLAlchemy query object.
:param allow_empty_first_page: If this flag is true and there is no
data in database against given query then
it will return empty list on getting
first page otherwise this will raise
EmptyPage error. Default value of this
parameter is true.
:type allow_empty_first_page: bool.
"""
self.query_set = query_set
self.per_page_limit = per_page_limit
self.optional_count_query_set = optional_count_query_set
self.allow_empty_first_page = allow_empty_first_page
self.__total_pages = self.__count = None
self.__iter_page = 1
def __iter__(self):
"""The __iter__ returns the iterator object and is implicitly called at
the start of loops"""
self.__iter_page = 1
return self
def __next__(self):
"""Returns the next page and is implicitly called at each loop
increment."""
if self.__iter_page > self.total_pages:
raise StopIteration
page = self.page(self.__iter_page)
self.__iter_page += 1
return page
next = __next__
def validate_page_number(self, page_number):
"""
This method valid that if given page number is valid or not. Like page
number should be integer and greater than zero and should not be greater
than total number of pages.
:param page_number: Required page number against which you want to fetch
records from database.
:type page_number: int.
:return: If given page number is valid then return it.
:rtype: int.
..warning::
This function can raise the following exceptions
- PageNotAnInteger
- EmptyPage
"""
try:
page_number = int(page_number)
except ValueError:
raise PageNotAnInteger('That page number is not an integer')
if page_number < 1:
raise EmptyPage('That page number is less than 1')
if page_number > self.total_pages:
if page_number == 1 and self.allow_empty_first_page:
pass
else:
raise EmptyPage('That page contains no results')
return page_number
def page(self, page_number):
"""
Returns a page object against given page number if given page number is
valid.
:param page_number: Required page number against which you want to fetch
records from database.
:type page_number: int.
:return: Page object that contains the records against given page
number.
:rtype: Page.
..seealso::
- Page class
- Paginator.validate_page_number()
..warning::
This function can raise the following exceptions
- PageNotAnInteger
- EmptyPage
"""
page_number = self.validate_page_number(page_number)
offset = (page_number - 1) * self.per_page_limit
return Page(self.query_set.offset(offset).limit(self.per_page_limit).all(),
page_number, self)
def __get_count(self):
"""
Returns the total number of objects, across all pages.
:return: Total number of records against given query.
:rtype: int.
..info::
If optional_count_query_set is given then this function will use
query for fetching total number records otherwise query_set query
will be used for fetching total number records.
"""
if self.__count is None:
if self.optional_count_query_set is None:
self.optional_count_query_set = self.query_set.order_by(None)
count_query = self.optional_count_query_set.statement.with_only_columns([func.count()])
self.__count = self.optional_count_query_set.session.execute(count_query).scalar()
return self.__count
count = property(__get_count)
def __get_total_pages(self):
"""
Returns the total number of pages.
:return: Total number of pages against given query.
:rtype: int.
..info::
If total number of records is zero and allow_empty_first_page is
true then returns 1 instead of 0.
"""
""
if self.__total_pages is None:
if self.count == 0 and not self.allow_empty_first_page:
self.__total_pages = 0
else:
hits = max(1, self.count)
self.__total_pages = int(ceil(hits / float(self.per_page_limit)))
return self.__total_pages
total_pages = property(__get_total_pages)
def __pages_range(self):
"""
Returns a range of pages.
:return: List that contains range of pages.
:rtype: list.
"""
return range(1, self.total_pages + 1)
pages_range = property(__pages_range)
class Page(object):
"""
This is a same copy of django Page class in paginator module. This class
will be used in Paginator class for making pages. This Page class contains
a list of objects of one page, page number and reference of paginator
instance.
"""
def __init__(self, object_list, number, paginator):
self.object_list = object_list
self.number = number
self.paginator = paginator
def __repr__(self):
return '<Page %s of %s>' % (self.number, self.paginator.total_pages)
def has_next(self):
return self.number < self.paginator.total_pages
def has_previous(self):
return self.number > 1
def has_other_pages(self):
return self.has_previous() or self.has_next()
def __next_page_number(self):
return self.number + 1
next_page_number = property(__next_page_number)
def __previous_page_number(self):
return self.number - 1
previous_page_number = property(__previous_page_number)
def __start_index(self):
"""
Returns the index of the first object on this page,
relative to total objects in the paginator.
"""
# Special case, return zero if no items.
if self.paginator.count == 0:
return 0
return (self.paginator.per_page_limit * (self.number - 1)) + 1
start_index = property(__start_index)
def __end_index(self):
"""
Returns the index of the last object on this page,
relative to total objects found (hits).
"""
# Special case for the last page
if self.number == self.paginator.total_pages:
return self.paginator.count
return self.number * self.paginator.per_page_limit
end_index = property(__end_index)
|
19,325 | 2abf2ee9c8291c3354cdfced577ba14167da3e62 | # Generated by cx_Freeze
import PySide2
from PySide2.support.signature.mapping import Default, Instance, Invalid, Missing, Virtual
import shiboken2
import PySide2.QtCore
import PySide2.QtPositioning
import PySide2.QtLocation
from PySide2.support.signature import typing
|
19,326 | 9f65bb5ee1387354ec002d97bc682a74b7cc970d | # External modules
import matplotlib
matplotlib.use('GTKAgg')
import matplotlib.pyplot as plt
# Internal Modules
import plotClasses
from plotCommands import *
#############################################################################
#############################################################################
# INPUT
plots = [ZnPdconvEng,ZnPdEngAbsDiff,ZnPdConvEng_d]
#############################################################################
#############################################################################
def axMaker(n):
divs = [i for i in range(1,n+1) if n%i==0]
nrows = divs[len(divs)/2]
ncols = n / nrows
f,axs = plt.subplots(nrows=nrows,ncols=ncols)
if n == 1: return [axs]
elif n <= 3: return axs
else: return [item for sublist in axs for item in sublist]
def maker(pfdict):
"""Take plotFunc dictionary from plotCommands. Feed into plotfuncmaker from plotClasses"""
f = plotClasses.makerDict[pfdict.pop('maker')]
return f.makeFunc(**pfdict)
#############################################################################
#############################################################################
def main():
axs = axMaker(len(plots))
for i,p in enumerate(plots): maker(p).plot(axs[i])
plt.show()
if __name__ == '__main__': main() |
19,327 | 0d53098b17176ca6da2597d36aecb34fe280fc02 | #!/usr/bin/env python
# ==============================================================================
# = quick script that takes a file full of dataset containers paired with
# = dataset sizes and breaks up the datasets to be divided amongst out eos
# = spaces.
# ==============================================================================
# ==============================================================================
class UserData(object):
def __init__(self, user_name = 'user', max_size = 1000):
self.user_name = user_name
self.max_size = max_size
self.current_size = 0
self.ds_list = []
def addSample(self, ds):
self.ds_list.append(ds)
self.current_size += ds.size
def printUserInfo(self):
print 'user name: ' , self.user_name
print ' max size: ' , self.max_size
print ' allocated size: ' , self.current_size
print ' number samples: ' , len(self.ds_list)
def printUserDataSets(self):
self.printUserInfo()
print ' DS list:'
for ds in self.ds_list:
print ' %s' % ds.name
print ''
def printUserDataSetsToFile(self, out_file_name):
out_file = file(out_file_name, 'w')
out_file.write('user name: %s\n' % self.user_name)
out_file.write(' max size: %s\n' % self.max_size)
out_file.write(' allocated size: %s \n' % self.current_size)
out_file.write(' number samples: %s \n' % len(self.ds_list))
out_file.write('\n')
for ds in self.ds_list:
out_file.write(' %s\n' % ds.name)
out_file.close()
# ==============================================================================
class DataSet(object):
def __init__(self, ds_name, num_files, ds_size):
self.name = ds_name
self.num_files = num_files
self.size = ds_size
# ------------------------------------------------------------------------------
def getDSList(ds_list_file_name):
ds_list = []
f = file(ds_list_file_name)
for l in f.readlines():
splits = l.split()
ds_list.append( DataSet( splits[0]
, int(splits[1])
, float(splits[2])
)
)
return ds_list
# ------------------------------------------------------------------------------
def fillUserLists(user_list, ds_list):
# loop over data-sets
for ds in ds_list:
ds_allocated = False
# look at user list. assign this ds to the first user who can hold it
for user in user_list:
if user.current_size + ds.size < user.max_size:
print 'Adding sample %s to user %s' % (ds.name, user.user_name)
user.addSample(ds)
ds_allocated = True
break
if not ds_allocated:
print "ERROR: We don't have space for sample " , ds.name
for user in user_list:
user.printUserInfo()
print ''
# ------------------------------------------------------------------------------
def main():
user_list = [
UserData('evelyn', 900)
, UserData('leigh' , 900)
, UserData('liz' , 800)
, UserData('brett' , 700)
]
total_space = reduce( lambda a, b: a+b , [x.max_size for x in user_list] )
print 'total available space: ' , total_space
ds_list = getDSList('ContainerSizes.TNT_107.txt')
fillUserLists(user_list, ds_list)
for user in user_list:
user.printUserDataSets()
user.printUserDataSetsToFile('files.%s.txt' % user.user_name)
# ==============================================================================
if __name__ == '__main__':
main()
|
19,328 | ad6c0eb3f94fb4376084989585d657e6178ceb9c | #!/usr/bin/python3
# -*- coding: utf-8 -*-
'''
AC
'''
import sys
import time
import collections
import string
class Solution:
def ladderLength(self, beginWord, endWord, wordList):
queue = collections.deque([(beginWord, 1)])
ls = string.ascii_lowercase
visited = set()
wordList =set(wordList)
while queue:
word, dist = queue.popleft()
if word == endWord:
return dist
for i in range(len(word)):
for j in ls:
if j != word[i]:
newWord = word[:i]+j+word[i+1:]
if newWord not in visited and newWord in wordList:
queue.append((newWord, dist+1))
visited.add(newWord)
return 0
if __name__ == "__main__":
if sys.version_info.major == 3:
t0 = time.perf_counter()
else:
t0 = time.time()
test_list = ["zb"]
test_list_2 = ["ad"]
test_word_list = [chr(a)+chr(b) for a in range(ord("a"),ord("y")+1) for b in range(ord("a"),ord("z")+1)]
answer_list = [3]
test = Solution()
for i in range(len(test_list)):
out_t = test.ladderLength(test_list[i], test_list_2[i], test_word_list)
if out_t == answer_list[i]:
print("\033[1;32;40m Pass \033[0m")
else:
print(
"\033[1;31;40m Fail!!\033[0m\033[0;34;40m out \"%s\" should \"%s\" by \"%.50s\" "
% (out_t, answer_list[i],
str(test_list[i]) + " " + str(test_list_2[i])))
if sys.version_info.major == 3:
print("\nRun Time is %f s" % (time.perf_counter() - t0))
else:
print("\nRun Time is %f s" % (time.time() - t0))
'''
from collections import deque
class Solution(object):
def ladderLength(self, beginWord, endWord, wordList):
def construct_dict(word_list):
d = {}
for word in word_list:
for i in range(len(word)):
s = word[:i] + "_" + word[i+1:]
d[s] = d.get(s, []) + [word]
return d
def bfs_words(begin, end, dict_words):
queue, visited = deque([(begin, 1)]), set()
while queue:
word, steps = queue.popleft()
if word not in visited:
visited.add(word)
if word == end:
return steps
for i in range(len(word)):
s = word[:i] + "_" + word[i+1:]
neigh_words = dict_words.get(s, [])
for neigh in neigh_words:
if neigh not in visited:
queue.append((neigh, steps + 1))
return 0
if endWord not in wordList:
return 0
d = construct_dict(set(wordList) | set([beginWord, endWord]))
return bfs_words(beginWord, endWord, d)
'''
|
19,329 | 24e1a49aca6cac2a09e81d35ad446c4cc5b54581 | import asyncio
async def test1():
await test2()
print('This is test1')
async def test2():
print("This is test2")
if __name__ =="__main__":
b = test1()
b.send(None) |
19,330 | 8224f5ce766d1e8208584aa5ec3f52e5bd45885f | # Generated by Django 3.0.1 on 2020-06-27 13:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('todo_api', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='todo',
name='completed',
field=models.BooleanField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='todo',
name='order',
field=models.IntegerField(blank=True, default=None, null=True),
),
migrations.AlterField(
model_name='todo',
name='url',
field=models.CharField(blank=True, default=None, max_length=500, null=True),
),
]
|
19,331 | a644aa4f93fda5704182ba4eab0aa7a21633dbb4 | import torch
import torch.nn as nn
from modules.vgg16 import SkipVGG16
from modules.unet.unet_model import UNet
# from modules.renet import ReNet
from modules.attenet2 import DecoderLayer
from modules.dcgan_decoder import DcganDecoder
from modules.MobileNetDenseASPP import DenseASPP, _DenseAsppBlock
from modules.utils import make_position_encoding, AttentionLayer
import numpy as np
import config
class ReSeg(nn.Module):
r"""ReSeg Module (with modifications) as defined in 'ReSeg: A Recurrent
Neural Network-based Model for Semantic Segmentation'
(https://arxiv.org/pdf/1511.07053.pdf).
* VGG16 with skip Connections as base network
* Two ReNet layers
* Two transposed convolutional layers for upsampling
* Three heads for semantic segmentation, instance segmentation and
instance counting.
Args:
n_classes (int): Number of semantic classes
use_instance_seg (bool, optional): If `False`, does not perform
instance segmentation. Default: `True`
pretrained (bool, optional): If `True`, initializes weights of the
VGG16 using weights trained on ImageNet. Default: `True`
use_coordinates (bool, optional): If `True`, adds coordinate
information to input image and hidden state. Default: `False`
usegpu (bool, optional): If `True`, runs operations on GPU
Default: `True`
Shape:
- Input: `(N, C_{in}, H_{in}, W_{in})`
- Output:
- Semantic Seg: `(N, N_{class}, H_{in}, W_{in})`
- Instance Seg: `(N, 32, H_{in}, W_{in})`
- Instance Cnt: `(N, 1)`
Examples:
>>> reseg = ReSeg(3, True, True, True, False)
>>> input = torch.randn(8, 3, 64, 64)
>>> outputs = reseg(input)
>>> reseg = ReSeg(3, True, True, True, True).cuda()
>>> input = torch.randn(8, 3, 64, 64).cuda()
>>> outputs = reseg(input)
"""
def __init__(self, n_classes, use_instance_seg=True,
pretrained=True,
use_coordinates=False, use_wae=True, usegpu=True, training=True):
super(ReSeg, self).__init__()
self.backbone = 'Unet'
self.n_classes = n_classes
self.use_instance_seg = use_instance_seg
self.base = UNet(n_channels=21)
self.use_wae = use_wae
self.training = training
self.decoder = DecoderLayer()
# if self.use_wae:
# self.decoder = DcganDecoder(decoder_opt)
# Decoder
if self.backbone == 'Unet':
# Semantic Segmentation
self.channelAttend = AttentionLayer(self.base.n_filters)
self.sem_seg_output = nn.Conv2d(self.base.n_filters,
self.n_classes, kernel_size=(1, 1),
stride=(1, 1))
# Instance Segmentation
if self.use_instance_seg:
self.ins_seg_output_1 = nn.Sequential(
nn.Conv2d(self.base.n_filters, self.base.n_filters,
kernel_size=(3, 3),
stride=(1, 1), padding=(1, 1),
groups=self.base.n_filters),
nn.BatchNorm2d(self.base.n_filters),
nn.ReLU6(),
nn.Conv2d(self.base.n_filters, config.d_model, # * 2
kernel_size=(1, 1),
stride=(1, 1)),
nn.BatchNorm2d(config.d_model),
nn.ReLU6()
)
self.ins_seg_output_2 = nn.Sequential(
nn.Conv2d(config.d_model, config.d_model*2, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(config.d_model*2),
nn.ReLU6(),
nn.Conv2d(config.d_model*2, config.d_model*2, kernel_size=(3, 3), stride=(1, 1),
padding=(1, 1),
groups=config.d_model*2),
nn.BatchNorm2d(config.d_model*2),
nn.ReLU6(),
nn.Conv2d(config.d_model*2, config.d_model, kernel_size=(1, 1), stride=(1, 1)),
nn.BatchNorm2d(config.d_model)
)
# 加个1*1
self.positioin_encoding = None
def forward(self, training, *_input):
if len(_input) == 4:
x, sem_seg_target, ins_seg_target, N = _input
else:
x = _input[0]
if self.backbone == 'Unet':
x_dec, *X = self.base(x)
# Semantic Segmentation
x_att = self.channelAttend(x_dec)
sem_seg_out = self.sem_seg_output(x_att)
if len(_input) == 4:
sem_seg_argmax = sem_seg_target.argmax(1).unsqueeze(1).float()
else:
sem_seg_argmax = sem_seg_out.argmax(1).unsqueeze(1).float()
if self.use_instance_seg:
x_enc = self.ins_seg_output_1(x_dec)
x_enc = self.ins_seg_output_2(x_enc) + x_enc
if x_enc.shape[0]==1:
a = 1
ins_cost, criterion, ins_ce_loss, ins_dice_loss = self.decoder(x_enc, sem_seg_argmax, ins_seg_target, N, training, X)
return sem_seg_out, sem_seg_argmax, ins_cost, criterion, ins_ce_loss, ins_dice_loss #, cluster
else:
return sem_seg_out, sem_seg_argmax
def set_position_encoding(self, ins_seg_out):
b, n_units, h, w = ins_seg_out.shape
h_vec = np.tile(make_position_encoding(np, 1, h, n_units // 2, f=10000.)[:, :, :, np.newaxis], (1, 1, 1, w))
w_vec = np.tile(make_position_encoding(np, 1, w, n_units // 2, f=10000.)[:, :, np.newaxis, :], (1, 1, h, 1))
vec = np.concatenate([h_vec, w_vec], axis=1)
return torch.from_numpy(vec).cuda()
|
19,332 | 41c71a1e0cae58ad5c2194c021c36249616a034c | def splitIntoTwo(arr):
# Write your code here
l_sum = 0
r_sum = sum(arr)
cnt=0
for i in range(len(arr)-1):
l_sum+=arr[i]
r_sum-=arr[i]
if l_sum > r_sum:
cnt+=1
return cnt
|
19,333 | 72264cbdb79c35b2fa104318a52a74d9f588dd5c | import gi
gi.require_version ("Gtk", "3.0")
from gi.repository import Gtk
class Panel (Gtk.Grid):
def __init__(self):
Gtk.Grid.__init__(self)
boton1 = Gtk.Button(label="Boton 1")
boton2 = Gtk.Button(label="Boton 2")
boton3 = Gtk.Button(label="Boton 3")
boton4 = Gtk.Button(label="Boton 4")
boton5 = Gtk.Button(label="Boton 5")
boton6 = Gtk.Button(label="Boton 6")
self.add(boton1)
self.attach(boton2, 1, 0, 2, 1)
self.attach_next_to(boton3, boton1, Gtk.PositionType.BOTTOM, 1, 2)
self.attach_next_to(boton4, boton3, Gtk.PositionType.RIGHT, 2, 1)
self.attach(boton5, 1, 2, 1, 1)
self.attach_next_to(boton6, boton5, Gtk.PositionType.RIGHT, 1, 1)
class ExemploGtkStack(Gtk.Window):
def __init__(self):
Gtk.Window.__init__ (self, title = "Exemplo con Gtk.Stack e Gtk.StackSwicher")
self.set_size_request (200, 100)
caixaV = Gtk.Box (orientation = Gtk.Orientation.VERTICAL, spacing = 6)
self.add (caixaV)
stack = Gtk.Stack ()
stack.set_transition_type (Gtk.StackTransitionType.SLIDE_LEFT_RIGHT)
stack.set_transition_duration (1000)
botonChequeo = Gtk.CheckButton (label = "Púlsame!")
stack.add_titled (botonChequeo, "Chequeo", "Botón de chequeo")
stack_switcher = Gtk.StackSwitcher ()
stack_switcher.set_stack (stack)
etiqueta = Gtk.Label ()
etiqueta.set_markup ("<big>Etiqueta elegante</big>")
stack.add_titled (etiqueta, "Etiqueta", "Unha etiqueta")
panel = Panel()
stack.add_titled (panel, "Panel", "Panel de traballo")
stack_switcher.set_stack (stack)
caixaV.pack_start(stack, True, True, 0)
caixaV.pack_start (stack_switcher, True, True, 0)
self.connect ("delete-event", Gtk.main_quit)
self.show_all()
if __name__ == "__main__":
ExemploGtkStack ()
Gtk.main() |
19,334 | 68b160f8dbc2d54bc843e1e6156f517264296038 | import os
import json
import boto3
import logging
import requests
from routes.endpoints import app_config
client = boto3.client(
"cloudformation",
aws_access_key_id=os.environ.get("AWS_S3_ACCESS_KEY"),
aws_secret_access_key=os.environ.get("AWS_S3_SECRET_KEY"),
region_name=os.environ.get("REGION")
)
# - setup logging so i can see what is going on in AWS - #
log = logging.getLogger(__name__)
#-------------------------------------------------------#
def filter_stacks(stacks) -> dict:
"""
filter out stacks that do not have "stack-finder" in the tags
this will eliminate bloated stacks from stack finder
- input:
- stacks: stacks that are called with boto3 api
- output:
- filters stacks dictionary with only tagged stacks
"""
if not app_config.TAGS:
return stacks
keep = list()
for stack in stacks["Stacks"]:
for tag in stack["Tags"]:
for filter_tag in app_config.TAGS:
if filter_tag in tag.values():
keep.append(stack)
stacks["Stacks"] = keep
return stacks
def get_stack_name_stack_group(stacks) -> list:
"""
- this has been replaced with a new way of grabbing
stack groups
get the stack_name and pipeline that stack belongs to
the user interface will use these to filter based on
groups in order to find them easier
- input:
- stacks: stacks that we have already filtered to make sure
stack-finder is a key
- output:
- list of stack names and stack groups
"""
stack_names = []
for stack in stacks:
_stack = {"stack": stack["StackName"]}
for tag in stack["Tags"]:
if tag["Key"] == "stack-finder":
_stack["group"] = tag["Value"]
stack_names.append(_stack)
return stack_names
def get_stacks() -> dict:
"""
get stacks from aws
- output:
- dictionary object containing stack information
"""
return filter_stacks(
stacks=client.describe_stacks()
)
def get_tuples_helper(element) -> list:
"""
method to be used with map
- input:
- element: object from outputs list
- output:
- list of exportnames and output values
"""
try:
return [element["ExportName"], element["OutputValue"]]
except:
return [element["OutputKey"], element["OutputValue"]]
def get_tuples(outputs) -> list:
"""
try to get export name, if key error revert back to output key
- input:
- outputs: list object containing key value pairs
- output:
- list object containing a list of export name and value
"""
return list(map(get_tuples_helper, outputs))
def format_stack_outputs(outputs) -> str:
"""
NOTE: the output will be different for html/javascript
get key value pairs formated for output
- input:
- outputs: an array of dictionarys
- outputs:
- string object split with <br/> to be outputed in html
"""
outputs_tuples = get_tuples(outputs)
return outputs_tuples
def build_bucket_url(bucket_name) -> str:
"""
build url that can be used to redirect user to the bucket
- input:
- bucket_name: name of the bucket to redirect to
- output:
- string url that will be used by the frontend
"""
return "https://s3.console.aws.amazon.com/s3/buckets/{0}".format(bucket_name)
def build_logs_url(log_name) -> str:
"""
build url that can be used to redirect user to the log group
- input:
log_name: name of the logs we are interested in
- output:
- string url for the front end to call
"""
return "https://us-west-2.console.aws.amazon.com/cloudwatch/home?region=us-west-2#logStream:group={0}".format(log_name)
def build_service_url(cluster_name, service_name) -> str:
"""
build url the frontend will use to redirect to see the ECS service
- input:
- cluster_name: name that is output from cloudformation
- service_name: service name that was outputted from cloudformation
- output:
- string url that will be used by the frontend to redirect to
"""
return "https://us-west-2.console.aws.amazon.com/ecs/home?region=us-west-2#/clusters/{0}/services/{1}/details".format(cluster_name, service_name)
def get_user_name(_cache_user) -> str:
"""
get the username to be displayed
- input:
- _cache_user: user that has been stored in the cache from AD
- output:
- username or testing if run locally
"""
try:
return _cache_user["preferred_username"]
except KeyError:
return "Testing"
except TypeError:
return "Testing"
def perform_request(endpoint, token) -> dict:
"""
perform request to get the data we seek
- input:
- endpoint: endpoint to pull request from
- token: token received from adfs server
- output:
- response for graph about the data we want
"""
return requests.get(endpoint, headers={"Authorization": "Bearer "+token["access_token"]}).json()
def get_all_group_ids(token) -> list:
"""
loop until we have grabbed all the user
groups ids
- input:
- token: token that has been provided from the server
- output:
- list of group ids to be checked against our list
"""
ids=list()
_dict = perform_request(app_config.ENDPOINT, token)
while True:
for obj in _dict["value"]:
ids.append(obj["id"])
if "@odata.nextLink" not in _dict:
return ids
_dict = perform_request(_dict["@odata.nextLink"], token)
def perform_graph_call(token, user) -> bool:
"""
perform Azure Graph API call to see what groups the user is
apart of. This list of acceptable GROUP_IDs are located in the
app_config
- input:
- token: o auth token used to query Graph API
- output:
- boolean: determines if the user is apart of the group specified
"""
_dict = perform_request(app_config.ENDPOINT, token)
_ids = get_all_group_ids(token)
for _id in app_config.GROUP_ID:
if _id in set(_ids):
return True
return False
def _grab_tag(tag_name, stack) -> str:
"""
"""
for tag in stack["Tags"]:
if tag_name == tag["Key"]:
return tag["Value"]
return ""
def get_stack_tags_helper(stack) -> dict:
"""
helper function for get_stack_tags, this will
format the data we want to send to the ui.
- input:
- stack: stack element from aws list
- output:
- dict object that contains the data we want
"""
if len(stack["Tags"]) > 0:
return {
"stack": stack["StackName"],
"group": _grab_tag("stack-finder", stack),
"deployment_date": _grab_tag("deployment-date", stack),
"build_tag": _grab_tag("build-tag", stack)
}
else:
return stack["StackName"]
def get_stack_tags(stacks) -> list:
"""
iterate over stacks and grab the tags we want
there are several tags now so we will have to
handle nulls.
- input:
- stacks: list of stacks we grabbed from
aws cli
- output:
- list of tags that will be displayed on the ui
"""
return list(map(get_stack_tags_helper, stacks))
|
19,335 | 184d67dea9c293fb024303dac0f004085af14372 | it = 11
while it<20:
it = it + 1
if it ==16:
continue
print(it, end=' ')
##it = it+1
print("while loop completed") |
19,336 | 118ecc5dc753590532ae3aeea85d9355ee940f38 | """
Definition of TreeNode:
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
"""
class TreeNode:
def __init__(self, val):
self.val = val
self.left, self.right = None, None
class Solution:
def recurse(self, preorder, inorder):
lin = len(inorder)
if lin == 0:
return None
rootval = preorder.pop(0)
root = TreeNode(rootval)
rootidx = inorder.index(rootval)
root.left = self.recurse(preorder, inorder[:rootidx])
root.right = self.recurse(preorder, inorder[rootidx + 1:])
return root
"""
@param preorder : A list of integers that preorder traversal of a tree
@param inorder : A list of integers that inorder traversal of a tree
@return : Root of a tree
"""
def buildTree(self, preorder, inorder):
# write your code here
if len(preorder) == 0:
return None
if len(preorder) != len(inorder):
return None
return self.recurse(preorder, inorder)
|
19,337 | 8e9bd1d5181306bb0c07294eeda89c266972d0bd | from graphics import *
from time import sleep
from random import randint
winsize=[1000,600]
win=GraphWin("Test",winsize[0],winsize[1])
# for i in range(100):
# posx=randint(0,winsize[0])
# posy=randint(0,winsize[1])
# cir=Circle(Point(posx,posy),randint(5,50))
# r =randint(0,255)
# g =randint(0,255)
# b =randint(0,255)
# cir.setFill(color_rgb(r,g,b))
# # cir.setOutline(color_rgb(r,g,b))
# cir.draw(win)
# sleep(.33)
# cir.move(posx+randint(10,100),posy+randint(10,60) )
# sleep(.01)
pt=0
cir=Circle(Point(pt,pt),50)
cir.draw(win)
cir.setFill(color_rgb(randint(0,255),randint(0,255),randint(0,255)))
for i in range(400):
inc=1
cir.move(pt+inc,pt+inc)
sleep(.06)
inc+=1
|
19,338 | 7e551ef3ddf067fa5a7d40293b45fb683c1514f5 | def checkio(numbers):
"""
You are given the list of numbers with exactly the same length and you must find the shortest chain
of numbers to link the first number to the last
Parameters
----------
numbers
Numbers as a list of integers
Returns
-------
The shortest chain from the first to the last number as a list of integers
"""
digits_tree = {}
first_element = numbers.pop(0)
last_element = numbers[-1]
parent_node = [first_element]
child_node = []
while numbers:
for parent in parent_node:
numbers_to_remove = []
for number in numbers:
if parent % 100 == number % 100 or parent // 10 == number // 10 or \
(parent // 100 == number // 100 and parent % 10 == number % 10):
digits_tree[number] = parent
child_node.append(number)
if number == last_element:
value = last_element
path = [last_element]
while value != first_element:
value = digits_tree[value]
path.append(value)
return path[-1::-1]
else:
numbers_to_remove.append(number)
for number in numbers_to_remove:
numbers.remove(number)
parent_node, child_node = child_node, []
# These "asserts" using only for self-checking and not necessary for auto-testing
if __name__ == '__main__':
# print(checkio([456, 455, 454, 654]))
# print(checkio([111, 222, 333, 444, 555, 666, 121, 727, 127, 777]))
assert checkio([123, 991, 323, 321, 329, 121, 921, 125, 999]) == [123, 121, 921, 991, 999], "First"
assert checkio([111, 222, 333, 444, 555, 666, 121, 727, 127, 777]) == [111, 121, 127, 727, 777], "Second"
assert checkio([456, 455, 454, 356, 656, 654]) == [456, 454, 654], "Third, [456, 656, 654] is correct too"
|
19,339 | 34931f3040e49f597a3322e163a447904fd32e82 | if True:
print("yep")
|
19,340 | 0be27730f9e6b96e8cb77ee10710e82174dae41b | # -*- coding: utf-8 -*-
def main(handler):
return {
'say_hello':"Hello I main application context",
# context
}
|
19,341 | 4cd8a9c5c72fd522eaa6776a5c9eaafee910108c | from django.conf.urls import patterns, url
from .views import FeedListView, FeedCreateView, FeedUpdateView, FeedDeleteView
urlpatterns = patterns(
'',
url(r'^$', FeedListView.as_view(), name='feeds_list'),
url(r'^new/$', FeedCreateView.as_view(), name='feeds_create'),
url(r'^edit/(?P<pk>[\w]+)/$', FeedUpdateView.as_view(), name='feeds_update'),
url(r'^delete/(?P<pk>[\w]+)/$', FeedDeleteView.as_view(), name='feeds_delete'),
)
|
19,342 | f47354e5fc7d0c7c7b50d879c63304020ccb28e0 | import fb
import datetime
import json
from facepy import GraphAPI
# some constants that will be of use in the script
# the access token
token = ''
# connect to the facebook graph api
facebook_access = fb.graph.api(token)
# get the graph 'object'
my_graph = GraphAPI(token)
# get today's date
today_date = datetime.date.today()
# a list of birthday keywords that will help filter birthday messages
birthday_keywords = ["happy", "birthday", "bday", "b\'day", "wish", "birth day",
"bless", "blessings", "returns"]
# function that does a trivial check on a message to see if it is a birthday wish
def message_is_birthday_wish(message):
# get the lower case version of the message for easier checking for keywords
message = message.lower()
# go through all the keywords in the birthday keywords list
for keyword in birthday_keywords:
# if a keyword exists in the message, exit the function by returning true
if keyword in message:
return True
return False
# function to check if a post on facebook was made today
def post_was_posted_today(post):
# From the API, get the date of posting of the post
post_date_stamp = post['created_time'].split('T')[0]
# get the numerical value of the date
post_date = int(post_date_stamp.split('-')[2])
# get the numerical value of the month
post_month = int(post_date_stamp.split('-')[1])
# return true iff the post date was today
return post_date == int(today_date.day) and post_month == int(today_date.month)
# the function that will go through the posts and filter the birthday ones and like/comment on them
def automated_likes():
# this query will get you your feed from the graph
query = "/me/feed?limit=20"
# this will get you your feed
feed = my_graph.get(query)
# the list of all the messages (potential brithday wishes)
message_list = []
# filered posts will be identified by their id
id_list = []
# access the feed's data index to get all the posts
for post in feed['data']:
# if the post wasn't posted today, we can be sure that the posts before it are obviously older. Exit the loop
if not post_was_posted_today(post):
break
# all posts might not have a message. This ensures that only posts with messages are taken
try:
# get the textual content of the post
message = post['message']
# check if it is a birthday wish
if (message_is_birthday_wish(message)):
message_list.append(json.dumps(message))
# append the post id to the list of post id's that are to be liked
id_list.append(post['id'])
except:
pass
# see all the wishes
print(message_list)
# go through all the birthday posts and like them
for birthday_message_id in id_list:
facebook_access.publish(cat="likes", id=birthday_message_id)
# the function checks if today is actually the user's birthday by checking today's date and the facebook provide birthday
def birthday_is_today():
# this query gets the user's birthday
birthday_query = "/me/?fields=birthday"
# get the birthday JSON object from the graph API
birthday_object = my_graph.get(birthday_query)
# get the whole birthday string (mm/dd/yyyy) format
birthday_string = birthday_object['birthday']
# get the birthday month in integer format
birthday_month = int(birthday_string.split('/')[0])
# get the birthday year in integer format
birthday_date = int(birthday_string.split('/')[1])
# return true iff today is birthday
return birthday_month == int(today_date.month) and birthday_date == int(today_date.day)
if __name__ == '__main__':
if birthday_is_today():
print('Happy Birthday! Hope you are having a great day today!')
try:
automated_likes()
except:
print("Check your internet connection")
pass
else:
print("No need to run the script if it isn't your birthday today")
|
19,343 | f74afd8e39ed3557c5a0ed2a222abf7fc7dd58c5 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
ongeza
~~~~~~
An automated way to follow the Semantic Versioning Specification
Examples:
basic usage::
>>> version = Project().current_version
>>> version == (version if TRAVIS else __version__)
True
Attributes:
DEFAULT_TAG_FMT (str): The default tag format
DEFAULT_TAG_MSG_FMT (str): The default tag message format
DEFAULT_COMMIT_MSG_FMT (str): The default commit message format
"""
from __future__ import (
absolute_import, division, print_function, with_statement,
unicode_literals)
from os import getenv
import semver
from fnmatch import fnmatch
from subprocess import CalledProcessError
from builtins import * # noqa pylint: disable=unused-import
from .git_utils import Git, logger
__version__ = '1.12.2'
__title__ = 'ongeza'
__author__ = 'Reuben Cummings'
__description__ = 'Your Semantic Versioning personal assistant'
__email__ = 'reubano@gmail.com'
__license__ = 'MIT'
__copyright__ = 'Copyright 2015 Reuben Cummings'
DEFAULT_TAG_FMT = 'v{version}'
DEFAULT_TAG_MSG_FMT = 'Version {version} Release'
DEFAULT_COMMIT_MSG_FMT = 'Bump to version {version}'
TRAVIS = getenv('TRAVIS')
class Project(Git):
"""
Class representing a project.
Attributes:
bumped (bool): Has the project's version been bumped?
file (str): The file to search for a version.
version (str): The project's version.
Args:
dir_ (str): The project directory (default: None).
file_ (str): The file to search for a version (default: None).
version (str): The project's initial version (default: None).
verbose (bool): Enable verbose logging (default: False).
Returns:
New instance of :class:`pygogo.Gogo`
Examples:
>>> 'major' in semver.parse(Project().current_version)
True
"""
def __init__(self, dir_=None, file_=None, version=None, verbose=False):
"""Initialization method.
Examples:
>>> Project() # doctest: +ELLIPSIS
<ongeza.Project object at 0x...>
"""
super(Project, self).__init__(dir_, verbose)
self.bumped = False
self.file = file_
gsed = self.sh('sed --help')
self.sed = "sed -i" if gsed else "sed -i ''"
if version:
self.version = version
else:
self.version = self.current_version
@property
def current_version(self):
"""The current version parsed from most recent git tag
Returns:
str: current version
Examples:
>>> semver.parse(Project().current_version)['major'] >= 1
True
"""
if self.current_tag:
version = self.current_tag.lstrip('v')
else:
version = None
if version and not version_is_valid(version):
version = None
return version
@property
def versions(self):
"""All valid versions parsed from the git tags
Returns:
iterator: valid versions
Examples:
>>> len(list(Project().versions)) > 1
True
"""
versions = (t.lstrip('v') for t in self.tags)
return filter(version_is_valid, versions)
def gen_versioned_files(self, wave=1):
"""Generates file names which may contain a version string
Args:
wave (int): The set of files to search. Wave 1 includes specific
files, e.g., 'setup.py', 'bower.json', etc. Wave 2 includes
general files, e.g., '*.spec', '*.php', '*.py', etc. The best
practice is to only use wave 2 when wave 1 fails to return a
versioned file.
Yields:
str: file name
Examples:
>>> next(Project().gen_versioned_files()) == 'ongeza/__init__.py'
True
"""
if self.file:
yield self.file
else:
py_files = ['setup.cfg', 'setup.py', '*/__init__.py']
js_files = ['bower.json', 'package.json', 'component.json']
php_files = ['composer.json']
misc_files = ['*.spec', '*.php', '*.py', '*.xml', '*.json']
wave_one = py_files + js_files + php_files
switch = {1: wave_one, 2: misc_files}
for git_file in self.files:
if any(fnmatch(git_file, file_) for file_ in switch[wave]):
yield git_file
def set_versions(self, new_version, wave=1):
if not new_version:
return
for file_ in self.gen_versioned_files(wave):
if not self.version:
# get all lines in file
cmd = 'grep -ine "" %s' % file_
try:
lines = self.sh(cmd, True)
except CalledProcessError:
lines = None
if lines:
# escape double quotes
escaped = lines.replace('"', '\\"')
# find first line containing a version number and the word
# 'version'
cmd = 'echo "%s" | grep version' % escaped
cmd += ' | grep -m1 "[0-9]*\.[0-9]*\.[0-9]*"'
try:
rep_line = self.sh(cmd, True)
except CalledProcessError:
cmd = None
else:
rep_line_num = rep_line.split(':')[0]
# replace with new version number
cmd = ("%s %ss/[0-9]*\.[0-9]*\.[0-9]*/%s/g' %s"
% (self.sed, rep_line_num, new_version, file_))
else:
cmd = None
else:
# replace current version with new version only if the line
# contains the word 'version'
cmd = ("%s '/version/s/%s/%s/g' %s"
% (self.sed, self.version, new_version, file_))
self.sh(cmd) if cmd else None
self.bumped = self.is_dirty
def ongeza(self, type_):
"""Bumps a project to a new version
Args:
type_ (str): bump type. one of:
m or major: [x].0.0
n or minor: x.[y].0
p or patch: x.y.[z]
Returns:
str: new version
Examples:
>>> project = Project()
>>> old_version = semver.parse(project.version)
>>> new_version = semver.parse(project.ongeza('m'))
>>> new_version['major'] == old_version['major'] + 1
True
>>> new_version = semver.parse(project.ongeza('minor'))
>>> new_version['minor'] == old_version['minor'] + 1
True
"""
switch = {
'm': semver.bump_major,
'n': semver.bump_minor,
'p': semver.bump_patch,
'major': semver.bump_major,
'minor': semver.bump_minor,
'patch': semver.bump_patch}
new_version = switch.get(type_)(self.version)
if new_version in set(self.versions):
self.logger.error('version `%s` already present', new_version)
new_version = None
return new_version
def version_is_valid(version):
"""Determines whether a given version meets the semver spec, and if so
returns the parsed result.
Args:
version (str): The version to test
Returns:
dict: The parsed version (or an empty dict).
Examples:
>>> bool(version_is_valid('1.0.1'))
True
>>> bool(version_is_valid('1.0.1')['major'])
True
>>> bool(version_is_valid('1.0'))
False
"""
try:
return semver.parse(version)
except (ValueError, TypeError):
logger.debug('%s is not a valid version', version)
return {}
|
19,344 | db121274a3ff7d2617af2fa3e97a69efb155bdf0 | #-------------------------------------------------------------------------------
# Name: Surface.py
# Purpose: Green Scale Tool TM Surface Module (handles model at the surface level)
#
# Author: Holly Tina Ferguson
#
# Created: 15/09/2013
# Copyright: (c) Holly Tina Ferguson 2013
# Licence: The University of Notre Dame
#-------------------------------------------------------------------------------
import math
from objects.BaseElement import BaseElement
from objects.Temperature import Temperature
from objects.Area import Area
from GSUtility import GSUtility
import logging
#TM_coder = logging.getLogger('TMcoder_V1')
TM_user = logging.getLogger('TMuser_V1')
class Surface(BaseElement):
# Cartesian point
cartesian_point = None
# Space the surface is in
space = None
gbxml = ""
# Openings
openings = list()
constructions = list()
layers = list()
materials = list()
#def __init__(self):
# set the first temperature by default
#self.Tp.append(14.5+273)
def is_exterior(self):
"""
Determines if the surface is exterior or interior
"""
exterior_types = ["ExteriorWall", "Roof", "InteriorWall", "UndergroundWall", "RaisedFloor"]
return self.obj_type in exterior_types
def calculate_surface_heatflux(self, weather, spaces_dict, surface, temp_record, Coeff, space, h_surface, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, Aflag, terrain, areaDict, areaWinDict, shadowRatios, shadowRatioIndex):
"""
Calculates the heatflux of the surface.
Different for each type.
Takes a weather for a certain tstep.
"""
#print "Reaching Surface function..."
# First get the As
A_total = self.get_A(surface, areaDict, areaWinDict)
if Aflag == 0:
# If it is the first surface of the space, label the space ID in the log file:
la = str(surface.obj_id)
lb = str(surface.obj_type)
#TM_user.info("%s,surface area,%s,%s" % (la, A_total, lb))
A_noWin = self.get_A_noWin(surface, areaDict, areaWinDict)
A_noOp = self.get_A_noOp(surface, areaDict, areaWinDict)
T_space = spaces_dict[space.obj_id][1]
T1 = weather["t_outside"]
hc_external = float(self.get_hc_external(weather, surface, h_surface, terrain))
transmitted_win = 0
Q_flux = 0
# need the surface related information, T_space, U, R3
U = self.get_U_surface_e(A_total, A_noOp, surface, areaWinDict) # U = Infor_surface{11,i_surface}; Defined Below
#print U
R3 = 1/U
# Using calculations from: self.surface.constr.layer.C # Infor_surface{10, i_surface} ; from gbXML
C = self.get_C_surface(A_total, A_noOp, surface, Coeff, areaWinDict) # need to pass surface and opening ids
#print C
temperature = Temperature()
#Sub-routines for each wall type based on the returned hc_external
# This hc is different for each surface type so moved under this sub-routine area
#hc = 3.076 sent this to the Temperature Object
if surface.obj_type == "ExteriorWall":
transmitted_win, Q_flux = temperature.exterior_wall(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)
#print Q_flux
if surface.obj_type == "Roof":
transmitted_win, Q_flux = temperature.roof(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record, ShadowsFlag, ns, shadow_record, shade_surf_list, surfaces_dict, areaWinDict, shadowRatios, areaDict, shadowRatioIndex)
#print Q_flux # Matches for Four Room
if surface.obj_type == "InteriorWall":
transmitted_win, Q_flux = temperature.interior_wall(surface, A_total, R3, C, spaces_dict, T_space, temp_record)
#print Q_flux # Matches for Four Room
if surface.obj_type == "UndergroundWall":
transmitted_win, Q_flux = temperature.underground_wall(surface, A_total, R3, C, T_space, temp_record) # No instance of yet to test
if surface.obj_type == "RaisedFloor":
# This will eventually need some values when we start using raised floors
transmitted_win, Q_flux = temperature.raised_floor(surface, hc_external, T1, A_total, A_noWin, weather, R3, C, A_noOp, T_space, temp_record) # Not instance of yet to test
return transmitted_win, Q_flux
def get_hc_external(self, weather, surface, h_surface, terrain):
"""
Get the external convection coefficient of exterior surfaces
"""
roughness = surface.construction[0].roughness_unit # Change back to this line...left as below to match Na's
if roughness == "VeryRough":
D = 11.58
E = 5.894
F = 0
elif roughness == "Rough":
D = 12.49
E = 4.065
F = 0.028
elif roughness == "MediumRough":
D = 10.79
E = 4.192
F = 0.0
elif roughness == "MediumSmooth":
D = 8.23
E = 4.0
F = -0.057
elif roughness == "Smooth":
D = 10.22
E = 3.1
F = 0.0
elif roughness == "VerySmooth":
D = 8.23
E = 3.33
F = -0.036
else:
D = 8.23
E = 4.0
F = -0.057
print "No Roughness Value Found so Set Default Values of 8.23,4.0,-0.057"
wind_speed_temp = weather["wind_speed"]
# Terrain Lookup Table
if terrain == 'Flat or Open Countryside':
sigma = 270
a = 0.14
elif terrain == 'Rough or Wooded Country':
sigma = 370
a = 0.22
elif terrain == 'Towns and City Scapes':
sigma = 460
a = 0.33
elif terrain == 'Ocean Front Areas':
sigma = 210
a = 0.10
elif terrain == 'Urban, Industrial, or Forest':
sigma = 370
a = 0.22
else:
sigma = 370
a = 0.22
print "No Terrain Type Found so Set Default Values of 370,0.22"
terrain_sigma = sigma
terrain_cof = a
# Adjust the wind speed...Stable air above human inhabited areas:
#wind_speed = wind_speed_temp * ((h_surface / 10) ** 0.5) # This was the line used to get wind_speed before terrain was added
# Wind speed corrected for terrain differences;
wind_speed = wind_speed_temp * ((270/10) ** 0.14) * (h_surface/terrain_sigma) ** terrain_cof
#print wind_speed
# Calculate the hc_external
# hc_external= D+E*Wind_speed+F*Wind_speed^2
hc_external = D + (E * wind_speed) + (F * wind_speed ** 2)
# depending on the direction of the wind adjust the hc_external...as of versions 3 and 4 this part seems omitted
#x = abs(wind_speed_dir - azimuth)
#if x > 100:
# if x < 260:
# hc_external *= 0.5
#print "hc_external : ", hc_external, D, E, F
return round(hc_external, 5)
def get_A(self, surface, areaDict, areaWinDict):
# Calculates the total A of the surface.
#A = round(surface.height * surface.width * 0.3048 * 0.3048, 6)
#print A
#area = Area()
#A = area.surfaceArea(surface.cps, surface.azimuth, surface.tilt, surface)
#A = area.getArea(surface.obj_id)
A = areaDict[surface.obj_id]
#print "from TM", A
return A
def get_A_noWin(self, surface, areaDict, areaWinDict):
# Calculates the area of the surface in cm2 minus all the window openings
#A = round(surface.height * surface.width * 0.3048 * 0.3048, 6)
#area = Area()
#A = area.surfaceArea(surface.cps, surface.azimuth, surface.tilt, surface)
#A = area.getArea(surface.obj_id)
A = areaDict[surface.obj_id]
# The remove the A of each of the openings
for opening in surface.openings:
if opening.obj_type != "Air":
# Only subtract if window
if opening.obj_type == "OperableWindow" or opening.obj_type == "FixedWindow":
#A -= (opening.height * opening.width * 0.3048 * 0.3048)
#A -= area.surfaceArea(opening.ocps, surface.azimuth, surface.tilt, opening)
#A -= area.getArea(opening.obj_id)
A -= areaWinDict[opening.obj_id]
#print A
#print surface.obj_id
return A
def get_A_noOp(self, surface, areaDict, areaWinDict):
# Calculates the area of the surface in cm2 minus all the window openings
#A = round(surface.height * surface.width * 0.3048 * 0.3048, 6)
#area = Area()
#A = area.surfaceArea(surface.cps, surface.azimuth, surface.tilt, surface)
#A = area.getArea(surface.obj_id)
A = areaDict[surface.obj_id]
# The remove the A of each of the openings
for opening in surface.openings:
if opening.obj_type != "Air":
# Only subtract if window
if opening.obj_type == "OperableWindow" or opening.obj_type == "FixedWindow" or opening.obj_type == "NonSlidingDoor":
#A -= (opening.height * opening.width * 0.3048 * 0.3048)
#A -= area.surfaceArea(opening.ocps, surface.azimuth, surface.tilt, opening)
#A -= area.getArea(opening.obj_id)
#A -= areaWinDict[opening.obj_id]
A -= areaWinDict[opening.obj_id]
#print A
return round(A, 6)
def get_U_surface_e(self, A_total, A_noOp, surface, areaWinDict):
# Calculates the U for the whole surface: surface, windows, and doors combined
#UA=Infor_surface_simp(i_surface).U*A_noOpen + sum([Infor_surface_simp(i_surface).Opening(:).U].*[Infor_surface_simp(i_surface).Opening(:).Area]);
#Infor_surface_simp(i_surface).U_ave=UA/Infor_surface_simp(i_surface).Area;
ua = 0
ua_win = 0
UA_openings = 0
#area = Area()
#print "number of openings: ", len(surface.openings)
if len(surface.openings) > 0:
for opening in surface.openings:
if opening.obj_type != "Air":
# Only subtract if window
if opening.obj_type == "OperableWindow" or opening.obj_type == "FixedWindow":
u_win = self.get_U_win(opening)
#A_Op = round(opening.height * opening.width * 0.3048 * 0.3048, 6)
#A_Op = area.surfaceArea(opening.ocps, surface.azimuth, surface.tilt, opening)
#A_Op = area.getArea(opening.obj_id)
A_Op = areaWinDict[opening.obj_id]
ua_win += u_win * A_Op
elif opening.obj_type == "NonSlidingDoor":
u_op = self.get_U_opening(opening)
#A_Op = round(opening.height * opening.width * 0.3048 * 0.3048, 6)
#A_Op = area.surfaceArea(opening.ocps, surface.azimuth, surface.tilt, opening)
#A_Op = area.getArea(opening.obj_id)
A_Op = areaWinDict[opening.obj_id]
ua += u_op * A_Op
U1 = self.get_U_surface(A_noOp, surface)
UA = U1*A_noOp + (ua_win + ua)
U = UA / A_total
# Get the total with the surface, windows, and doors:
#UA = UA + U1*A_noOp + UA_win
#U = UA/A_total
else:
U = self.get_U_surface(A_noOp, surface)
#if surface.obj_id == "su-3":
#print "This is the U-value: ", U
return round(U, 6)
def get_U_win(self, opening):
# Want the U-value of the windows only
# if isempty(Infor_surface{5, 1}) ???
U_win = 0
#for opening in surface.openings:
if opening.obj_type != "Air":
# Want only the windows, so proceed only if the opening in the list is a known window type
if opening.obj_type == "OperableWindow" or opening.obj_id == "FixedWindow":
U_win = opening.material[0].u_value
else:
U_win = 0
return round(U_win, 4)
def get_U_opening(self, opening):
# Want the U-value of the openings other than windows
# Get U value of each opening. Doors and windows are different
U = 0
#for opening in surface.openings:
if opening.obj_type != "Air":
if opening.obj_type == "NonSlidingDoor":
U = opening.material[0].u_value
elif opening.obj_type == "OperableSkylight":
U = opening.material[0].u_value # Set to zero for now due to the fact that these do not get a U-value from Revit
elif opening.obj_type == "FixedWindow":
U = opening.material[0].u_value # Set to zero for now due to the fact that these do not get a U-value from Revit
elif opening.obj_type == "Air":
U = 0 # Set to zero for now due to the fact that these do not get a U-value from Revit
else:
U = 0 # Set to zero for now due to the fact that these do not get a U-value from Revit
return round(U, 6)
def get_U_surface(self, A_noOp, surface):
# if strncmp(Infor_surface{1, surface_id},'X',1)
# U = 0 ; # I do not think this is necessary since the effective area is what is considered in python
U = 0
if A_noOp <= 0:
U = 0
else:
U = surface.construction[0].u_value
#print "U for this surface from gbxml tree: ", U
return round(U, 6)
def get_C_surface(self, A_total, A_noOp, surface, Coeff, areaWinDict):
# Calculates the C of the whole surface from the C value we already have for each material layer
#area = Area()
#CA=Infor_surface_simp(i_surface).C*A_noOpen + sum([Infor_surface_simp(i_surface).Opening(:).C].*[Infor_surface_simp(i_surface).Opening(:).Area]);
#Infor_surface_simp(i_surface).C_ave=CA/Infor_surface_simp(i_surface).Area;
#print "reaching this C"
#Coeff = 1 # Passed from Main so that this can be set as desired easily later on, Na's Ex is 0.45
C = 0
C_door = 0
C_door_total = 0
# get the C_total calculated in gbXML tree from the layers function for this surface
C_wall = surface.construction[0].layer[0].C_Total
C_wall_total = (C_wall*A_noOp)
#print "wall ", C_wall, " ", A_total, " ", A_noOp
# get the total C for the total door area in this surface (C value of a window = 0)
for opening in surface.openings:
if opening.obj_type != "Air":
if opening.obj_type == "NonSlidingDoor":
#A_this_door = round(opening.height * opening.width * 0.3048 * 0.3048, 6)
#A_this_door = area.surfaceArea(opening.ocps, surface.azimuth, surface.tilt, opening)
#A_this_door = area.getArea(opening.obj_id)
A_this_door = areaWinDict[opening.obj_id]
#print "area of door is: ", A_this_door
C_door = opening.material[0].layer[0].C_Total
#print "c of door is: ", C_door
C_door_total += (A_this_door * C_door)
# C = 0 # resets in Na's code ???
#print "door: ",C_door_total
C = C_wall_total + C_door_total
C = C/A_total
C = Coeff*C
#C = float(C)
return round(C, 6)
|
19,345 | 7efcf87d77130afe4ba1769277cea592f43a0740 | class BuildBlock:
def show_in_menu():
Button = '<div class="contain">Constant</div>'
return Button
def show_in_screen():
Screen = '<input type="number" class="constant" id="valueconst" style="width: 50px"/>'
return Screen
def Add_Evaluation():
Evaluation = "case 'constant': result = Parent.value;\n break;"
return Evaluation |
19,346 | 26d95b01bbb5027ca3a255653bd6b4eeb9ae8891 |
from .sk import ServerSocket, ClientSocket
from .event import Event
from .client import Client, Server
from .packet import Packet
from threading import Thread
import time
class PySocketServer :
def __clientCallback(self, client) :
while client.thread_state :
try :
data = ServerSocket.receive(self.receive_size, client.socket)
packet = Packet.decode(data)
client.data = packet.data
self.event.emit(packet.packet_name, args=[client])
except :
break
def __connectCallback(self) :
while self.threads_state :
try :
self.server.connect()
client = Client(
self.clients,
self.server.client_socket,
self.server.addr
)
client.thread = Thread(target=self.__clientCallback, args=(client,))
client.thread.daemon = True
client.thread.start()
self.clients.append(client)
self.event.emit("connect", args=[client])
except :
break
def __disconnectCallback(self) :
while self.threads_state :
try :
for i, client in enumerate(self.clients) :
if not self.check(client.socket) :
self.event.emit("disconnect", args=[client])
client.close()
self.clients.pop(i)
time.sleep(self.delay)
except :
break
def __init__(self, host, port) :
self.server = ServerSocket(host, port)
self.event = Event()
# listen server
self.server.listen()
self.clients = []
self.delay = 0.3 # default delay (0.5s = 500ms)
self.receive_size = 100000 # default receive buffer size
self.threads_state = True
self.connect_thread = Thread(target=self.__connectCallback)
self.connect_thread.daemon = True
self.connect_thread.start()
self.disconnect_thread = Thread(target=self.__disconnectCallback)
self.disconnect_thread.daemon = True
self.disconnect_thread.start()
def check(self, socket) :
try :
ServerSocket.send(Packet("alive", "Are you alive?").encode(), socket)
return True
except :
return False
def on(self, event_name, callback) :
self.event.on(event_name, callback)
def close(self) :
self.server.socket.close()
self.threads_state = False
def connect(self) :
# to word thread
while True :
try :
time.sleep(self.delay)
except KeyboardInterrupt :
self.close()
break
class PySocketClient(PySocketServer) :
def __setDefaultEvent(self) :
self.event.on("alive", lambda s: s)
# anything else may be here
def __serverCallback(self, server) :
self.__setDefaultEvent()
while self.threads_state :
try :
data = self.client.receive(self.receive_size)
packet = Packet.decode(data)
server.data = packet.data
self.event.emit(packet.packet_name, args=[server])
except :
break
def __init__(self, host, port) :
self.client = ClientSocket(host, port)
self.event = Event()
self.delay = 0.3 # default
self.receive_size = 100000 # default size
self.threads_state = True
self.client.connect()
self.server_thread = Thread(target=self.__serverCallback, args=[Server(self.client.socket)])
self.server_thread.daemon = True
self.server_thread.start()
def close(self):
self.client.socket.close()
self.threads_state = False |
19,347 | b90aae4af57ca2e92c618d2f839a4692152e54fe | #! /usr/bin/env python
#coding=utf-8
import os
import datetime
from werkzeug import FileStorage
from flask import Module, Response, request, flash, json, g, current_app,\
abort, redirect, url_for, session, render_template, send_file, send_from_directory
from flaskext.principal import identity_changed, Identity, AnonymousIdentity
from webapp.permissions import auth_permission, admin_permission
from webapp.extensions import db, sina_api#, qq_api
from webapp.models import User, FinanceRecord
admin = Module(__name__)
@admin.route("/")
@admin_permission.require(404)
def index():
return redirect(url_for('admin.cash_logs'))
@admin.route("/cash_logs")
@admin.route("/cash_logs/page/<int:page>")
@admin_permission.require(404)
def cash_logs(page=1):
page_obj = FinanceRecord.query.filter(FinanceRecord.source==FinanceRecord.EXTRACT) \
.paginate(page, per_page=FinanceRecord.PER_PAGE)
page_url = lambda page: url_for('admin.cash_logs', page=page)
return render_template("admin/cash_logs.html",
page_obj=page_obj,
paeg_url=page_url)
@admin.route("/cashed/<int:record_id>")
@admin_permission.require(404)
def cashed(record_id):
record = FinanceRecord.query.get_or_404(record_id)
record.status = FinanceRecord.SUCCESS
db.session.commit()
next_url = request.args.get('next','')
if not next_url:
next_url = url_for('admin.cash_logs')
return redirect(next_url)
|
19,348 | bc52cababcbe5fe0f23072bd48409ab778a7f836 | from __future__ import division
from __future__ import print_function
import sys,time,datetime,copy,subprocess,itertools,pickle,warnings,numbers
import numpy as np
import scipy as sp
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib as mpl
from .indexed_ndarray import indexed_ndarray
##################################
## MCMC
##################################
"""
def MCMC_DetermieStepSize(F_LG,para_ini,Data,cdt,stg,n_core,prior=[],opt=[]):
step_size_list = np.array([0.06,0.08,0.1,0.12,0.15,0.2,0.25,0.3,0.4,0.5])
m = len(step_size_list)
p = Pool(n_core)
rslt = []
for i in range(m):
stg_tmp = deepcopy(stg)
stg_tmp['step_size'] = step_size_list[i]
rslt.append( p.apply_async(MCMC,args=[F_LG,para_ini,Data,cdt,stg_tmp,200,prior,['print']]) )
p.close()
p.join()
rslt = [ rslt[i].get() for i in range(m) ]
step_size = [ rslt[i][2] for i in range(m) ]
r_accept = [ rslt[i][3] for i in range(m) ]
elapsed_time = [ rslt[i][4] for i in range(m) ]
dtl = pd.DataFrame(np.vstack([step_size,r_accept,elapsed_time]).T,columns=['step_size','r_accept','elapsed_time'])
opt_step_size = dtl.iloc[ np.argmin(np.fabs(dtl['r_accept'].values.copy()-0.5)) ]['step_size']
return [opt_step_size,dtl]
def MCMC_prl(F_LG,para_ini,Data,cdt,stg,n_sample,n_core,prior=[],opt=[]):
print( "MCMC n_sample:%d n_core:%d" % (n_sample,n_core) )
##determine step size
[opt_step_size,dtl1] = MCMC_DetermieStepSize(F_LG,para_ini,Data,cdt,stg,n_core,prior,opt)
stg['step_size'] = opt_step_size
print( "estimated processing time %.2f minutes" % (dtl1['elapsed_time'].mean()*n_sample/200.0/60.0) )
p = Pool(n_core)
rslt = [ p.apply_async(MCMC,args=[F_LG,para_ini,Data,cdt,stg,n_sample,prior,opt]) for i in range(n_core) ]
p.close()
p.join()
rslt = [ rslt[i].get() for i in range(n_core) ]
para_mcmc = pd.concat([rslt[i][0].iloc[0::10] for i in range(n_core) ],ignore_index=True)
L_mcmc = np.array([ rslt[i][1][0::10] for i in range(n_core) ]).flatten()
step_size = np.array([ rslt[i][2] for i in range(n_core) ])
r_accept = np.array([ rslt[i][3] for i in range(n_core) ])
elapsed_time = np.array([ rslt[i][4] for i in range(n_core) ])
dtl2 = pd.DataFrame(np.vstack([step_size,r_accept,elapsed_time]).T,columns=['step_size','r_accept','elapsed_time'])
dtl_mcmc = {'step_size':opt_step_size,'dtl1':dtl1,'dtl2':dtl2}
return [para_mcmc,L_mcmc,dtl_mcmc]
def MCMC(model,n_sample,step_coef,prior=[],opt=[]):
#random number seed
seed = datetime.datetime.now().microsecond *datetime.datetime.now().microsecond % 4294967295
np.random.seed(seed)
#parameter
## parameter setting
param = model.para.inherit()
para1 = model.para.copy()
m = len(para1)
m_exp = len(para1["para_exp"])
m_ord = len(para1["para_ord"])
##step
step_MCMC = model.ste.copy()
step_MCMC["para_exp"] = np.minimum( np.log( 1.0 + step_MCMC["para_exp"]/para1["para_exp"] ) ,0.4)
step_MCMC.values *= step_coef
## prior setting
if prior:
##prior format transform
prior = [ {"name":prior_i[0],"index": prior_i[1], "type":prior_i[2], "mu":prior_i[3], "sigma": prior_i[4]} for prior_i in prior ]
prior = [ prior_i for prior_i in prior if prior_i["type"] != "f" ]
#prepare
para_mcmc = []
L_mcmc = []
#initial value
[L1,_] = Penalized_LG(model,para1,prior,only_L=True)
para_mcmc.append(para1.values)
L_mcmc.append(L1)
i = 1
j = 0
k = 0
t_start = time.time()
while 1:
para2 = para1.inherit().initialize_values(1)
para2["para_ord"] = para1["para_ord"] + np.random.randn(m_ord)*step_MCMC["para_ord"]
para2["para_exp"] = para1["para_exp"] * np.exp( np.random.randn(m_exp)*step_MCMC["para_exp"] )
[L2,_] = Penalized_LG(model,para2,prior,only_L=True)
if L1<L2 or np.random.rand() < np.exp(L2-L1): #accept
j += 1
k += 1
para1 = para2
L1 = L2
para_mcmc.append(para1.values)
L_mcmc.append(L1)
if 'print' in opt and np.mod(i,1000) == 0:
print(i)
#adjust the step width
if np.mod(i,500) == 0:
if k<250:
step_MCMC.values *= 0.95
else:
step_MCMC.values *= 1.05
k = 0
i += 1
if i == n_sample:
break
r_accept = 1.0*j/n_sample
elapsed_time = time.time() - t_start
para_mcmc = param.inherit().set_values(np.vstack(para_mcmc).transpose())
L_mcmc = np.array(L_mcmc)
return [para_mcmc,L_mcmc,r_accept,elapsed_time]
"""
##################################
## Quasi Newton
##################################
def Quasi_Newton(model,prior=[],merge=[],opt=[]):
## parameter setting
para_list = model.stg["para_list"]
para_length = [ model.stg["para_length"][key] for key in para_list ]
param = indexed_ndarray().set_hash(para_list,para_length)
param.add_hash( [ pr for pr in para_list if model.stg["para_exp"][pr] ], "para_exp")
param.add_hash( [ pr for pr in para_list if not model.stg["para_exp"][pr] ], "para_ord")
if 'para_ini' not in opt:
para = param.inherit().set_values_from_dict(model.stg['para_ini'])
else:
para = param.inherit().set_values_from_dict(opt['para_ini'])
step_Q = param.inherit().set_values_from_dict(model.stg["para_step_Q"]).values
m = len(para)
## prior setting
if prior:
##fix check
para_fix_index = [ (prior_i["name"],prior_i["index"]) for prior_i in prior if prior_i["type"] == "f" ]
para_fix_value = [ prior_i["mu"] for prior_i in prior if prior_i["type"] == "f" ]
param.add_hash(para_fix_index,"fix")
para["fix"] = para_fix_value
prior = [ prior_i for prior_i in prior if prior_i["type"] != "f" ]
else:
param.add_hash([],"fix")
## merge setting
if merge:
d = len(merge)
index_merge = pd.Series(0,index=pd.MultiIndex.from_tuples(param.index_tuple),dtype="i8")
for i in range(d):
para[merge[i]] = para[merge[i]].mean()
index_merge.loc[merge[i]] = i+1
M_merge_z = np.eye(m)[ index_merge == 0 ]
M_merge_nz = np.vstack( [ np.eye(m)[index_merge == i+1].sum(axis=0) for i in range(d) ] )
M_merge = np.vstack([M_merge_z,M_merge_nz])
M_merge_T = np.transpose(M_merge)
m_reduced = M_merge.shape[0]
else:
M_merge = 1
M_merge_T = 1
m_reduced = m
# calculate Likelihood and Gradient at the initial state
[L1,G1] = Penalized_LG(model,para,prior)
G1["para_exp"] *= para["para_exp"]
G1 = np.dot(M_merge,G1.values)
# main
H = np.eye(m_reduced)
i_loop = 0
while 1:
if 'print' in opt:
print(i_loop)
print(para)
#print(G1)
print( "L = %.3f, norm(G) = %e\n" % (L1,np.linalg.norm(G1)) )
#sys.exit()
if 'stop' in opt:
if i_loop == opt['stop']:
break
#break rule
if np.linalg.norm(G1) < 1e-5 :
break
#calculate direction
s = H.dot(G1);
s_data = np.dot(M_merge_T,s)
gamma = 1/np.max([np.max(np.abs(s_data)/step_Q),1])
s_data = s_data * gamma
s = s * gamma
#move to new point
s_extended = param.inherit().set_values(s_data)
para["para_ord"] += s_extended["para_ord"]
para["para_exp"] *= np.exp( s_extended["para_exp"] )
#calculate Likelihood and Gradient at the new point
[L2,G2] = Penalized_LG(model,para,prior)
G2["para_exp"] *= para["para_exp"]
G2 = np.dot(M_merge,G2.values)
#update hessian matrix
y = (G1-G2).reshape(-1,1)
s = s.reshape(-1,1)
if y.T.dot(s) > 0:
H = H + (y.T.dot(s)+y.T.dot(H).dot(y))*(s*s.T)/(y.T.dot(s))**2 - (H.dot(y)*s.T+(s*y.T).dot(H))/(y.T.dot(s))
else:
H = np.eye(m_reduced)
#update Gradients
L1 = L2
G1 = G2
i_loop += 1
###OPTION: Estimation Error
if 'ste' in opt:
ste = EstimationError(model,para,prior)
else:
ste = []
###OPTION: Check map solution
if 'check' in opt:
Check_QN(model,para,prior)
return [para,L1,ste,np.linalg.norm(G1),i_loop]
def Check_QN(model,para,prior):
ste = EstimationError_approx(model,para,prior)
ste["fix"] = 0
a = np.linspace(-1,1,21)
for index in para.index_tuple:
plt.figure()
plt.title(index)
for i in range(len(a)):
para_tmp = para.copy()
para_tmp[index] += a[i] * ste[index]
L = Penalized_LG(model,para_tmp,prior)[0]
plt.plot(para_tmp[index],L,"ko")
if i==10:
plt.plot(para_tmp[index],L,"ro")
#################################
## Basic funnctions
#################################
def G_NUMERICAL(model,para):
para_list = model.stg["para_list"]
para_length = model.stg["para_length"]
step_diff = para.inherit().set_values_from_dict(model.stg["para_step_diff"])
step_diff["para_exp"] *= para["para_exp"]
G = para.inherit().initialize_values(1)
for index in para.index_tuple:
step = step_diff[index]
"""
para_tmp = para.copy(); para_tmp[index] -= step; L1 = model.LG(para_tmp)[0]
para_tmp = para.copy(); para_tmp[index] += step; L2 = model.LG(para_tmp)[0]
G[index]= (L2-L1)/2/step
"""
para_tmp = para.copy(); para_tmp[index] -= 2*step; L1 = model.LG(para_tmp)[0]
para_tmp = para.copy(); para_tmp[index] -= 1*step; L2 = model.LG(para_tmp)[0]
para_tmp = para.copy(); para_tmp[index] += 1*step; L3 = model.LG(para_tmp)[0]
para_tmp = para.copy(); para_tmp[index] += 2*step; L4 = model.LG(para_tmp)[0]
G[index]= (L1-8*L2+8*L3-L4)/12/step
return G
def Hessian(model,para,prior):
para_list = model.stg["para_list"]
para_length = model.stg["para_length"]
step_diff = para.inherit().set_values_from_dict(model.stg["para_step_diff"])
step_diff["para_exp"] *= para["para_exp"]
H = para.inherit().initialize_values(len(para))
for index in para.index_tuple:
step = step_diff[index]
para_tmp = para.copy(); para_tmp[index] -= step; G1 = Penalized_LG(model,para_tmp,prior)[1].values
para_tmp = para.copy(); para_tmp[index] += step; G2 = Penalized_LG(model,para_tmp,prior)[1].values
H[index] = (G2-G1)/2/step
H["fix"] = 0
H.values[H.hash["fix"],H.hash["fix"]] = -1e+20
return H
def EstimationError(model,para,prior):
H = Hessian(model,para,prior)
ste = para.inherit().set_values(np.sqrt(np.diag(np.linalg.inv(-H.values))))
return ste
def EstimationError_approx(model,para,prior):
H = Hessian(model,para,prior)
ste = para.inherit().set_values(1.0/np.sqrt(np.diag(-H.values)))
return ste
def Penalized_LG(model,para,prior,only_L=False):
[L,G] = model.LG(para,only_L)
if isinstance(G,str):
G = G_NUMERICAL(model,para)
## fix
if not only_L:
G["fix"] = 0
## prior
if prior:
for prior_i in prior:
para_key = prior_i["name"]
para_index = prior_i["index"]
prior_type = prior_i["type"]
mu = prior_i["mu"]
sigma = prior_i["sigma"]
x = para[(para_key,para_index)]
if prior_type == 'n': #prior: normal distribution
L += - np.log(2*np.pi*sigma**2)/2 - (x-mu)**2/2/sigma**2
if not only_L:
G[(para_key,para_index)] += - (x-mu)/sigma**2
elif prior_type == 'ln': #prior: log-normal distribution
L += - np.log(2*np.pi*sigma**2)/2 - np.log(x) - (np.log(x)-mu)**2/2/sigma**2
if not only_L:
G[(para_key,para_index)] += - 1/x - (np.log(x)-mu)/sigma**2/x
elif prior_type == "b": #prior: barrier function
L += - mu/x
if not only_L:
G[(para_key,para_index)] += mu/x**2
elif prior_type == "b2": #prior: barrier function
L += mu *np.log10(np.e)*np.log(x)
if not only_L:
G[(para_key,para_index)] += mu * np.log10(np.e)/x
return [L,G]
#################################
## para_stg
#################################
def merge_stg(para_stgs):
stg = {}
stg['para_list'] = []
stg['para_length'] = {}
stg['para_exp'] = {}
stg['para_ini'] = {}
stg['para_step_Q'] = {}
stg['para_step_diff'] = {}
for para_stg in para_stgs:
stg['para_list'].extend(para_stg['list'])
stg['para_length'].update(para_stg['length'])
stg['para_exp'].update(para_stg['exp'])
stg['para_ini'].update(para_stg['ini'])
stg['para_step_Q'].update(para_stg['step_Q'])
stg['para_step_diff'].update(para_stg['step_diff'])
return stg
|
19,349 | 8edfdb67d38e000e7dcf6b8b30806d0d674b2099 | from pyramid.view import view_config
from .middleware import login_required, with_user_response
from .models import User, Password
from .request import redirect
@view_config(route_name='login', renderer='templates/login.jinja2')
def login(request):
if request.method == 'GET':
return {}
user = User.from_form(request)
if not user:
return {'error': 'invalid data'}
session = user.login()
response = redirect(request, '/')
response.set_cookie('session', session)
return response
@view_config(route_name='register', renderer='templates/register.jinja2')
def register(request):
if request.method == 'GET':
return {}
user = User.from_form(request)
if not user:
return {'error': 'invalid data'}
was_user = User.from_username(user.username)
if was_user:
return {'error': 'username taken'}
user.register()
return redirect(request, '/login/')
@view_config(route_name='logout')
def logout(request):
user = User.from_request_session(request)
if not user:
return redirect(request, '/')
user.logout(request)
return redirect(request, '/')
@view_config(route_name='users', renderer='templates/users.jinja2')
@with_user_response
def list_users(_request):
users = User.list()
return {'users': users}
@view_config(route_name='user_profile', renderer='templates/user.jinja2')
@with_user_response
def get_user(request):
username = request.matchdict.get('username')
if not username:
return redirect(request, '/')
if request.method == 'GET':
return {'username': username}
user = User.from_username(username)
if not user:
return redirect(request, '/')
password = request.POST.get('password')
# FIXME: someone tricked me :(
# if not раsswоrd:
# return {'error': 'invalid data', 'username': username}
exists = not password or Password.check(user.username, password)
if exists:
return {'passwords': Password.list(user.username), 'username': username}
return {'error': 'invalid guess', 'username': username}
@view_config(route_name='add_password', renderer='templates/add_password.jinja2')
@login_required
@with_user_response
def add_password(request):
if request.method == 'GET':
return {}
password = Password.from_form(request)
if not password:
return {'error': 'invalid data'}
password.add()
return redirect(request, f'/users/{password.user}/')
@view_config(route_name='home', renderer='templates/index.jinja2')
@with_user_response
def my_view(request):
return {'project': 'passman'}
|
19,350 | 4cc63b4938703ddd3275365cd4bdc445f7d1a4a4 | import json
from requests import HTTPError
class MailjetError(Exception):
def __init__(self, *args, **kwargs):
self.email_message = kwargs.pop('email_message', None)
self.payload = kwargs.pop('payload', None)
if isinstance(self, HTTPError):
self.response = kwargs.get('response', None)
else:
self.response = kwargs.pop('response', None)
super(MailjetError, self).__init__(*args, **kwargs)
def __str__(self):
parts = [
" ".join([str(arg) for arg in self.args]),
self.describe_send(),
self.describe_response(),
]
return "\n".join(filter(None, parts))
def describe_send(self):
if self.payload is None:
return None
description = "Sending a message"
try:
to_emails = [to['email'] for to in self.payload['message']['to']]
description += " to %s" % ','.join(to_emails)
except KeyError:
pass
try:
description += " from %s" % self.payload['message']['from_email']
except KeyError:
pass
return description
def describe_response(self):
if self.response is None:
return None
description = "Mailjet API response %d: %s" % (self.response.status_code, self.response.reason)
try:
json_response = self.response.json()
description += "\n" + json.dumps(json_response, indent=2)
except (AttributeError, KeyError, ValueError):
try:
description += " " + self.response.text
except AttributeError:
pass
return description
class MailjetAPIError(MailjetError, HTTPError):
def __init__(self, *args, **kwargs):
super(MailjetAPIError, self).__init__(*args, **kwargs)
if self.response is not None:
self.status_code = self.response.status_code
|
19,351 | 46c7c509c1684732b6439865137286c51c6181ed | from django.conf import settings
SECRET_KEY = getattr(settings, 'SECRET_KEY', 'some secret key')
EMAIL_TEMPLATES_SHOULD_BE_OVERRIDED = getattr(settings,
'EMAIL_TEMPLATES_SHOULD_BE_OVERRIDED', []) |
19,352 | 1f25e13840b74aa17993d3f89f39dd43b54b3926 | import ROOT as rt
import numpy as np
import plotfactory as pf
import sys
from pdb import set_trace
output_dir = '/afs/cern.ch/work/v/vstampf/plots/candidates/recontuple/'
#########################################
# Make Chain from selection of samples
#########################################
fout = rt.TFile(output_dir+'flavcheckM_tr_sl.root', 'recreate')
# Get the option from the command line, using 'True' as a fallback.
if len(sys.argv)>1 and sys.argv[1] == 'test':
setting = False
print('Using a selection of samples')
else:
setting = True
print('Using all samples')
tt = pf.makechain(setting)
nentries = tt.GetEntries()
print('number of total entries in chain:\t\t\t%d'%(nentries))
pf.setpfstyle()
c_flavors = rt.TCanvas('flavors_tr_sl','flavors_tr_sl')
c_eos = rt.TCanvas('flavors_eos_tr_sl','flavors_eos_tr_sl')
c_mos = rt.TCanvas('flavors_mos_tr_sl','flavors_mos_tr_sl')
c_m1 = rt.TCanvas('flavors_m1_tr_sl','flavors_m1_tr_sl')
c_m2 = rt.TCanvas('flavors_m2_tr_sl','flavors_m2_tr_sl')
c_m3 = rt.TCanvas('flavors_m3_tr_sl','flavors_m3_tr_sl')
b_flavor = np.arange(11.,15,1.5)
h_flavors_l1 = rt.TH2F('flavors_l1','flavors_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_flavors_l2 = rt.TH2F('flavors_l2','flavors_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_eos_l1 = rt.TH2F('eos_l1','eos_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_eos_l2 = rt.TH2F('eos_l2','eos_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_mos_l1 = rt.TH2F('mos_l1','mos_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_mos_l2 = rt.TH2F('mos_l2','mos_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m1_l1 = rt.TH2F('m1_l1','m1_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m1_l2 = rt.TH2F('m1_l2','m1_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m2_l1 = rt.TH2F('m2_l1','m2_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m2_l2 = rt.TH2F('m2_l2','m2_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m3_l1 = rt.TH2F('m3_l1','m3_l1',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
h_m3_l2 = rt.TH2F('m3_l2','m3_l2',len(b_flavor)-1,b_flavor,len(b_flavor)-1,b_flavor)
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> flavors_l1', 'abs(l1_pdgId) == abs(n_pdgId) - 1') # l1 trailing
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> flavors_l2', 'abs(l2_pdgId) == abs(n_pdgId) - 1') # l2 trailing
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> eos_l1', 'abs(l0_pdgId) == 11 & abs(l1_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> eos_l2', 'abs(l0_pdgId) == 11 & abs(l2_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> mos_l1', 'abs(l0_pdgId) == 13 & abs(l1_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> mos_l2', 'abs(l0_pdgId) == 13 & abs(l2_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> m1_l1', 'hnl_hn_m < 3 & abs(l1_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> m1_l2', 'hnl_hn_m < 3 & abs(l2_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> m2_l1', 'hnl_hn_m > 3 & hnl_hn_m < 7 & abs(l1_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> m2_l2', 'hnl_hn_m > 3 & hnl_hn_m < 7 & abs(l2_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l2_pdgId) : abs(l1_pdgId) >> m3_l1', 'hnl_hn_m > 7 & abs(l1_pdgId) == abs(n_pdgId) - 1')
tt.Draw('abs(l1_pdgId) : abs(l2_pdgId) >> m3_l2', 'hnl_hn_m > 7 & abs(l2_pdgId) == abs(n_pdgId) - 1')
fout.Write()
h_flavors_l1.Add(h_flavors_l2)
h_mos_l1.Add(h_mos_l2)
h_eos_l1.Add(h_eos_l2)
h_m1_l1.Add(h_m1_l2)
h_m2_l1.Add(h_m2_l2)
h_m3_l1.Add(h_m3_l2)
hstupd8lst = [h_mos_l1,h_eos_l1,h_flavors_l1,h_m1_l1,h_m2_l1,h_m3_l1]
for hh in hstupd8lst:
hh.SetTitle(';l_subleading_pdgId ; l_trailing_pdgId')
hh.GetZaxis().SetTitle('Events')
hh.GetXaxis().SetTitleOffset(1.2)
hh.GetYaxis().SetTitleOffset(1.4)
hh.GetZaxis().SetTitleOffset(1.4)
c_flavors.cd()
h_flavors_l1.Draw('colz')
c_eos.cd()
h_eos_l1.Draw('colz')
c_mos.cd()
h_mos_l1.Draw('colz')
c_m1.cd()
h_m1_l1.Draw('colz')
c_m2.cd()
h_m2_l1.Draw('colz')
c_m3.cd()
h_m3_l1.Draw('colz')
for cc in [c_flavors,c_eos,c_mos,c_m1,c_m2,c_m3]:
cc.Modified()
cc.Update()
cc.SaveAs(output_dir+cc.GetTitle()+'.root')
cc.SaveAs(output_dir+cc.GetTitle()+'.pdf')
|
19,353 | 345460b31520fc9c2206c352cf584565af8012a9 | # basic_flag.py
# christopher mulkey
# 11/05/2012
# lab 1
import pygame
width = 640
height = 480
pygame.init()
screen = pygame.display.set_mode((width, height)) # Screen
#screen.fill(0, 0, 0) # Black Background
pygame.Surface.draw.rect(0, 0, 104) # White Flag Background
pygame.Surface.draw.circle(0, 0, 255) # Blue
pygame.Surface.draw.circle(255, 255, 0) # Yellow
pygame.Surface.draw.circle(0, 0, 0) # Black
pygame.Surface.draw.circle(0, 255, 0) # Green
pygame.Surface.draw.circle(255, 0, 0) # Red
running = True
pygame.display.flip()
for event in pygame.event.get():
if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key ==pygame.K_q):
run = False
####
#import pygame
#
#width = 640
#height = 480
#
#pygame.init()
#
#screen = pygame.display.set_mode((width, height)) # Screen
##
#screen.fill((0, 0, 0)) # Black Background
#pygame.draw.rect(screen,(0, 0, 104),(400, 200)) # White Flag Background
#pygame.draw.circle(screen,(0, 0, 255)) # Blue
#pygame.draw.circle(screen, (255, 255, 0)) # Yellow
#pygame.draw.circle(screen, (0, 0, 0)) # Black
#pygame.draw.circle(screen, (0, 255, 0)) # Green
#pygame.draw.circle(screen, (255, 0, 0)) # Red
##
#running = True
#pygame.display.flip()
#for event in pygame.event.get():
# if event.type == pygame.QUIT or (event.type == pygame.KEYDOWN and event.key ==pygame.K_q):
# run = False
#### |
19,354 | a8f629a3ea1b21e6282187a18d3b9f07a132a32e | T = int(input())
for i in range(T):
N = int(input())
temp = (N * (N+1))/2
a = list(map(int,input().split()))
a.sort()
s = sum(a)
s = temp-s
j = 1
for item in a:
if item > j:
print("Second")
break
j += 1
else:
if(s <= 0 or s%2 == 0):
print("Second")
else:
print("First")
|
19,355 | e11edbf56ebc03ae782e339505f1fde4d373f3f0 | import wmi
import time
import os
import csv
from datetime import datetime
tglf=str(datetime.now().strftime('%Y-%m-%d@%H-%M-%S'))
c = wmi.WMI()
t = wmi.WMI(moniker = "//./root/wmi")
batts1 = c.CIM_Battery(Caption = 'Portable Battery')
header0="""
<!DOCTYPE html>
<html>
<head>
<title>Log Parameter Baterai</title>
<meta http-equiv="refresh" content="2" >
</head>
<body style="background-color:#000000;">
<div style="font-size:15px;color:#F5FFFA;">
"""
tutup0="""
</div>
</body>
</html>
"""
header1="""
<!DOCTYPE html>
<html>
<head>
<title>Monitoring</title>
<meta http-equiv="refresh" content="2" >
<style>
h1 {text-align: center;}
p {text-align: center;}
div {text-align: center;}
</style>
</head>
<body style="background-color:#000000;">
<h1 style="font-size:50px;color:blue;text-alignment=center">Webpage Monitoring Baterai</h1>
<div style="font-size:30px;color:#F8F8FF;">
"""
tutup1="""
</div>
</body>
</html>
"""
def logbat():
for i, b in enumerate(batts1):
print('Battery %s Design Capacity: %s mWh' % (i, b.DesignCapacity or 0))
batts = t.ExecQuery('Select * from BatteryFullChargedCapacity')
for i, b in enumerate(batts):
print ('Battery %d Fully Charged Capacity: %d mWh' % (i, b.FullChargedCapacity))
batts = t.ExecQuery('Select * from BatteryStatus where Voltage > 0')
for i, b in enumerate(batts):
print("Sedang Logging")
onln=str(b.PowerOnline)
volt=str(b.Voltage/1000)
kap=str(b.RemainingCapacity/1000)
a=str(datetime.now().strftime('%Y-%m-%d %H:%M:%S'))
tgl=str(datetime.now().strftime('%Y-%m-%d'))
jam=str(datetime.now().strftime('%H:%M:%S'))
f = open("battery_log.html", "a")
f.write(header0+"Waktu : "+jam+"<br>Charging : "+onln+"<br>Voltage : "+volt+" Volt<br>Sisa power (Wh) : "+kap+"<br><br>"+tutup0)
f.close
f = open("battery_log.txt", "a")
f.write("Log : "+a+"\nCharging : "+onln+"\nTegangan Baterai : "+volt+" Volt \nSisa kapasitas : "+kap+" Wh\n")
f.close
f = open("batt_curr_stat.html", "w")
f.write(header1+"<br>Waktu : "+jam+"<br>Charging : "+onln+"<br>Voltage : "+volt+" Volt<br>Sisa Kapasitas : "+kap+" Wh<br>"+tutup1)
f.close()
with open(tglf+'.csv', 'a', newline='') as file:
writer = csv.writer(file)
writer.writerow([jam, onln, volt, kap])
def log():
try:
while True:
logbat()
time.sleep(5)
except KeyboardInterrupt:
print('Dihentikan')
def main1():
with open(tglf+'.csv', 'w', newline='') as file:
writer = csv.writer(file)
writer.writerow(["WAKTU", "STAT-CHG", "VOLTAGE(v)", "CAPACITY-REMAIN(Wh)"])
if os.path.exists("battery_log.txt"):
os.remove("battery_log.html")
os.remove("battery_log.txt")
log()
else:
log()
if __name__ == '__main__':
main1() |
19,356 | f53e821a7e03cb0394d318503ca40db6e86e8b6f | class estado:
def __init__(self,id_estado,nombre,descripcion):
self.id_estado=id_estado
self.nombre=nombre
self.descripcion=descripcion
estado1= estado(123,'pagado', 'ya pagado')
print(estado1.id_estado)
print(estado1.nombre)
print(estado1.descripcion) |
19,357 | c73d3fc170708ba29403fbcfe647445f0cb2945a | #!/usr/bin/python
from __future__ import print_function
import mysql.connector as mariadb
from mysql.connector import errorcode
import tkFileDialog, csv, sys
def main():
# mock
#studentFName = 'Fname'
#studentLName = 'Lname'
#studentId = '6001011110001'
#record = readCSV('database01.csv')
# define DB detail
dbName = "StudentRecords"
tables = {}
tables['Students'] = (
"CREATE TABLE `Students` ("
" `Student_id` varchar(13) NOT NULL, "
" `First_name` varchar(100) NOT NULL,"
" `Last_name` varchar(100) NOT NULL,"
" PRIMARY KEY (`Student_id`))")
#" UNIQUE (Student_id))")
tables['Student_Records'] = (
"CREATE TABLE `Student_Records` ("
" `PK` int(11) NOT NULL AUTO_INCREMENT,"
" `Subject` varchar(100) NOT NULL,"
" `Weight` int(1) NOT NULL,"
" `Section` int(3) NOT NULL,"
" `Grade` varchar(2) NOT NULL,"
" `Term` int(2) NOT NULL,"
" `Student_id` varchar(13) NOT NULL,"
" PRIMARY KEY (`pk`),"
" FOREIGN KEY (Student_id) REFERENCES Students(Student_id))")
# get student detail
studentFName = raw_input("First Name: ")
studentLName = raw_input("Last Name: ")
studentId = raw_input("Student ID: ")
# get CSV file
askopenfile = {
'title':"Select CSV file",
'filetypes':(("CSV files","*.csv"),("all files","*.*")),
}
csvPath = tkFileDialog.askopenfilename(**askopenfile)
if not (csvPath.endswith('.csv')): sys.exit("Error: Can't read file")
# get record from CSV file
record = readCSV(csvPath)
# connect to DB
mariadb_connection = mariadb.connect(user='root', password='')
cursor = mariadb_connection.cursor()
# try to use or create DATABASE
try:
cursor.execute("USE {}".format(dbName))
except:
cursor.execute("CREATE DATABASE {}".format(dbName))
cursor.execute("USE {}".format(dbName))
# try to create tables
for name, ddl in tables.iteritems():
try:
print ("Creating table {}: ".format(name), end='')
cursor.execute(ddl)
except mariadb.Error as err:
if err.errno == errorcode.ER_TABLE_EXISTS_ERROR:
print ("already exists.")
else:
print (err.msg)
else:
print ("OK")
# try to insert student data
try:
cursor.execute(
"INSERT INTO Students (Student_id,First_name,Last_name)"
"VALUES (%s,%s,%s);", (studentId,studentFName,studentLName))
except mariadb.Error as error:
print("Error: {}".format(error))
# try to insert transcript
term_count = 1
for term in record:
for subject in term:
try:
cursor.execute(
"INSERT INTO Student_Records "
"(Subject,Weight,Section,Grade,Term,Student_id) "
"VALUES (%s,%s,%s,%s,%s,%s);",
(subject[0], subject[1], subject[2], subject[3],
term_count, studentId))
except mariadb.Error as error:
print("Error: {}".format(error))
term_count += 1
mariadb_connection.commit()
mariadb_connection.close()
def readCSV(csvfile):
with open(csvfile) as csvfile:
next(csvfile)
reader = csv.reader(csvfile)
record, term = [], []
for row in reader:
if (row[0]): # add Subject to term
term.append([row[0],row[1],row[2],row[3]])
else : # start next term if empty row
if term == [] : break # end adding when see two empty row
record.append(term)
term = []
return record
main() |
19,358 | 730ec0be257ee06bb19d035fce487597d084ae85 |
import HouseDefinition3 as HD
from HousePi3 import CurrentTime
class Event():
def __init__(self,
Type = 'Timer',
Source = '',
Persistence = True,
Who = '',
TurnOn = 0,
TurnOff = 0,
Style = [],
Ack = 'NAK'):
self.Type = Type
self.Source = Source
self.Persistence = Persistence
self.Who = Who
self.TurnOn = TurnOn
self.TurnOff = TurnOff
self.Style = Style
self.Ack = Ack
def EventList(Timers = HouseDefinition.Timers):
Events = []
Index = 0
for ii in range(len(Timers)):
aTimer = Timers[ii]
Events.append(Event(**aTimer))
return(Events)
def CheckEvents(Events):
#
x = EventList()
|
19,359 | d3bc002fd47c0e0233f97a7353ea2391de44e091 | from django.shortcuts import render
from .models import Person, Sign
def home(request):
homeText = "Welcome to the home page."
context = {'homeText':homeText}
return render(request, 'home.html', context=context)
def about(request):
#person = Person.objects.get(first_name='Falon')
#personAdd = Person.objects.create(first_name= "Petty", last_name= "Wap")
aboutText = """ My areas of specialization are community art, activism
fashion and music subculture.
"""
person = Person.objects.all()
sign = Sign.objects.all()
#Libra = Sign.objects.get(astrology_sign="Libra")
#Gemini = Sign.objects.get(astrology_sign="Gemini")
#Sagittarius = Sign.objects.get(astrology_sign="Sagittarius")
#signList = Sign.objects.all()
#personAdd.save()
#context = {'aboutText':aboutText, "Libra":Libra, "Gemini":Gemini,"Sagittarius":Sagittarius, "signList":signList, 'person': person
#}
context = {'aboutText':aboutText, 'person':person, 'sign':sign,
}
return render(request=request, template_name='about.html', context=context)
def events(request):
events_information = "These are upcoming events that I have organized."
context = {'events_information': events_information}
return render(request, 'events.html', context=context) |
19,360 | 8d15d5f88a6bc185ceb61dff4cd8f8a5709a5017 | import music21
SCALE_LENGTH = 7 # Regular scales
LONG_SCALE_LENGTH = 8 # Diminished scales (half-whole and whole-half)
START_POS = 8
ROOT = 0
SECOND = 1
THIRD = 2
FOURTH = 3
FIFTH = 4
SIXTH = 5
SEVENTH = 6
def interval_to_int(input_interval):
result = ""
input_interval = str(input_interval)
for i in inputInterval[START_POS:]: # Read after the "21" in "music21"
if i.isdigit():
result = result + i
return int(result)
def scale_matcher(chord):
key_root = chord.root()
chord_kind = chord.commonName
if (chord_kind == 'major triad'):
return ionian(key_root)
elif (chord_kind == 'minor triad'):
return aeolian(key_root)
elif (chord_kind == 'major seventh chord'):
return lydian(key_root) + ionian(key_root)
elif (chord_kind == 'minor seventh chord'):
return dorian(key_root) + aeolian(key_root)
elif (chord_kind == 'half-diminished seventh chord'):
return locrian(key_root)
elif (chord_kind == 'flat-ninth pentachord'):
return half_whole(key_root)
elif (chord_kind == 'dominant seventh chord'):
return myxolydian(key_root)
elif (chord_kind == 'quartal trichord'):
return myxolydian(key_root)
elif (chord_kind == 'augmented major tetrachord'):
return melodic_min(key_root.transpose('M6'))
elif (chord_kind == 'Neapolitan pentachord'):
return half_whole(key_root)
elif (chord_kind == 'tritone quartal tetrachord'):
return half_whole(key_root)
def ionian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count != SECOND:
current_note = current_note.transpose('M2')
else:
current_note = current_note.transpose('m2')
count += 1
return result
def aeolian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == SECOND or count == FIFTH:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def dorian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == SECOND or count == SIXTH:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def phrygian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == ROOT or count == FIFTH:
currentNote = currentNote.transpose('m2')
else:
currentNote = currentNote.transpose('M2')
count += 1
return result
def lydian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == FOURTH:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def myxolydian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == THIRD or count == SIXTH:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def locrian(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == ROOT or count == FOURTH:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def half_whole(keyInput):
current_note = music21.note.Note(key)
result = []
count = 0
while count < LONG_SCALE_LENGTH:
result.append(current_note)
if count % 2 == 0:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
def whole_half(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < LONG_SCALE_LENGTH:
result.append(current_note)
if count % 2 == 0:
current_note = current_note.transpose('M2')
else:
current_note = current_note.transpose('m2')
count +=1
return result
def harm_min(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(currentNote)
if count == SECOND or count == FOURTH:
currentNote = currentNote.transpose('m2')
elif count == SIXTH:
currentNote = currentNote.transpose('a2')
else:
currentNote = currentNote.transpose('M2')
count += 1
return result
def melod_min(key):
current_note = music21.note.Note(key)
result = []
count = 0
while count < SCALE_LENGTH:
result.append(current_note)
if count == SECOND:
current_note = current_note.transpose('m2')
else:
current_note = current_note.transpose('M2')
count += 1
return result
|
19,361 | 60208750d0e52a5046449fafcb6b49f029ed1b5a | import requests as req
url = "https://rest.coinapi.io/v1/exchangerate/AXS/EUR"
apikey = "DF48B51C-50D6-4F3E-9479-0C72F83F4695"
cabecera = {"X-CoinAPI-Key": apikey}
respuesta = req.get(url, headers=cabecera)
print (respuesta.status_code)
midiccionario = respuesta.json()
print(midiccionario['rate'])
#print (respuesta.text)
print (respuesta.json()['rate']) |
19,362 | 2ba76a44615ec475ed86fe2055ed6f1211d6de4d | # coding: utf-8
"""
DBUtility class
Abstract::
- for file controlling
History::
- Ver. Date Author History
- [1.0.0] 2020/02/19 Pham New
Copyright (C) 2020 HACHIX Corporation. All Rights Reserved.
"""
import pymongo
import json
from collections import OrderedDict
from configparser import ConfigParser
import os
import time
from os import path
import sys
from pdb import set_trace
from threading import Lock
# Common error code
ERRMSG = {
0: None,
-1: 'Internal Error'
}
class SingletonMetaDB(type):
"""
DBを制御するインスタンスはSingleton Thread-safeに適用するクラス
"""
_instance = None
_lock = Lock()
def __call__(cls, *args, **kwargs):
with cls._lock:
if cls._instance is None:
cls._instance = super().__call__(*args, **kwargs)
return cls._instance
class DbController(metaclass=SingletonMetaDB):
client = None
retry_times = 5
retry_interval = 5 # Unit: ms
def __init__(self):
"""
constructor
"""
# Read the configuration file
current_dir = path.dirname(path.abspath(__file__))
config_file = os.path.join(current_dir, 'db_config.ini')
abs_path_config = os.path.abspath(config_file)
config_data = ConfigParser()
config_data.optionxform = str # differ with large and small character
config_data.read(abs_path_config, encoding="utf-8")
# connect to DB once
if DbController.client is None:
count = 0
while True:
if count > DbController.retry_times:
break
try:
client = pymongo.MongoClient(config_data['DB_INFO']['uri'])
print(client)
if client is not None:
DbController.client = client
break
except Exception as ex:
print("exception = {}".format(ex))
count += 1
time.sleep(DbController.retry_interval)
print(config_data['DB_INFO']['database'])
db = DbController.client[config_data['DB_INFO']['database']]
# 対象collectionを設定する。
self.talent5__staff_collection = db[config_data['DB_INFO']['talent5__staff_collection']]
self.talent5__staff_collection.create_index("id", unique=True)
self.talent5__encode_collection = db[config_data['DB_INFO']['talent5__encode_collection']]
self.talent5__feedback_collection = db[config_data['DB_INFO']['talent5__feedback_collection']]
self.talent5__count_collection = db[config_data['DB_INFO']['talent5__count_collection']]
@staticmethod
def set_schema_collection(db, schema_file, collection_name):
"""
Set the collection schema
"""
current_dir = path.dirname(path.abspath(__file__))
json_path = os.path.join(current_dir, schema_file)
abs_json_path = os.path.abspath(json_path)
with open(abs_json_path, 'r') as j:
validator = json.loads(j.read())
query = [('collMod', collection_name),
('validator', validator),
('validationLevel', 'moderate')]
query = OrderedDict(query)
db.command(query)
def get_collection(self, collection_name):
"""
Select the target collection
"""
collection = None
if collection_name == 'talent5__staff_collection':
collection = self.talent5__staff_collection
elif collection_name == 'talent5__encode_collection':
collection = self.talent5__encode_collection
elif collection_name == 'talent5__count_collection':
collection = self.talent5__count_collection
elif collection_name == 'talent5__feedback_collection':
collection = self.talent5__feedback_collection
return collection
def insert_staff(self, data):
"""
- data: insert data
- collection: (string)
"""
print("Inserting staff data")
try:
collection_name = 'talent5__staff_collection'
collection = self.get_collection(collection_name)
if collection is not None:
collection.insert(data)
print("The insert staff is done!")
return 0
else:
print('collection is None')
return -1
except:
print("Exception:", sys.exc_info())
return -1
def insert_encode(self, data):
"""
- data: insert data
- collection: (string)
"""
print("Inserting encode data")
try:
collection_name = 'talent5__encode_collection'
collection = self.get_collection(collection_name)
if collection is not None:
collection.insert(data)
print("The insert encode is done!")
return 0, "The insert encode is done!"
else:
print('collection is None')
return -1, 'collection is None'
except Exception as e:
msg = "Exception: {}".format(e)
print("Exception:", sys.exc_info())
return -1, msg
def insert_feedback(self, data):
"""
- data: insert data
"""
print("insert_feedback started")
try:
collection_name = 'talent5__feedback_collection'
collection = self.get_collection(collection_name)
if collection is not None:
collection.insert(data)
print("The insert feedback is done!")
return 0, "The insert feedback is done!"
else:
print('collection is None')
return -1, 'collection is None'
except Exception as e:
msg = "Exception: {}".format(e)
print("Exception:", sys.exc_info())
return -1, msg
def get_coll_length(self, collection_name):
try:
collection = self.get_collection(collection_name)
if collection is not None:
return 0, collection.count_documents()
else:
return -1, "collection {} is None".format(collection_name)
except Exception as e:
msg = "Error when get coll length: {}".format(e)
return -1, msg
def find_one(self, query, collection_name='talent5__staff_collection'):
"""
API for find single object
Args::
- query(Obj) : {"key": "value"}
"""
collection = self.get_collection(collection_name)
if collection is None:
return None
else:
return collection.find_one(query)
def find(self, query, collection_name='talent5__staff_collection'):
"""
API for find many objects
Args::
- query(Obj) : MongoDB query. Eg: {"key": "value"}
"""
collection = self.get_collection(collection_name)
if collection is None:
return None
else:
return collection.find(query, {'_id': False})
def get_prev_count_encode(self):
""""""
collection_name = 'talent5__count_collection'
collection = self.get_collection(collection_name)
item = list(collection.find({}, {'_id': False}).sort([('_id', -1)]).limit(1))
if len(item) == 0:
count = 0
else:
count = item[0]['count']
return count
def get_curr_count_encode(self):
""""""
collection_name = 'talent5__encode_collection'
collection = self.get_collection(collection_name)
count = collection.count_documents({})
return count
def update_prev_count_encode(self, count):
collection_name = 'talent5__count_collection'
collection = self.get_collection(collection_name)
collection.insert({'count': int(count)})
return None
def delete_null_id_record(self):
collection_name = 'talent5__encode_collection'
collection = self.get_collection(collection_name)
res = collection.delete_many({"id": ""})
return None
def add_ts_to_feedback_coll(self):
"""
add ts field to feedback coll
"""
collection_name = 'talent5__feedback_collection'
collection = self.get_collection(collection_name)
print("collection {}".format(collection))
cursor = collection.aggregate([
{"$addFields":{
"ts": {"$arrayElemAt":[
{"$split": ["$file_name", ".j"]}, 0]}
}
}
])
# print(list(cursor))
for doc in list(cursor):
print(doc['_id'])
collection.find_one_and_update({"_id": doc['_id']},
{"$set": {
"ts": doc['ts']
}})
return list(cursor)
if __name__ == '__main__':
db_ctrl = DbController()
# db_ctrl.delete_null_id_record()
db_ctrl.add_ts_to_feedback_coll()
|
19,363 | f0ba1779b905f22bb6992d5b1faf5157a97b3bea | useFixture(default)
def test():
from Modules import commonBits
java_recorded_version = '1.6.0_22'
if window('Record Layout Definitions'):
click('*1')
select('RecordDef.Record Name_Txt', 'xxzzxx')
select('RecordDef.Record Type_Txt', commonBits.fl('Group of Records')
)
select('TabbedPane', commonBits.fl('Child Records')
)
click(commonBits.fl('Insert'))
click(commonBits.fl('Insert'))
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0()')
select('ChildRecordsJTbl', 'ams PO Download: Allocation', commonBits.fl('Child Record') + ',0')
select('ChildRecordsJTbl', 'ams PO Download: Detail', commonBits.fl('Child Record') + ',1')
select('ChildRecordsJTbl', 'ams PO Download: Detail', commonBits.fl('Tree Parent') + ',0')
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Tree Parent') + ',0(ams PO Download: Detail)')
assert_p('ChildRecordsJTbl', 'Content', '[[, ams PO Download: Allocation, , , , , ams PO Download: Detail], [, ams PO Download: Detail, , , , , ]]')
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Tree Parent') + ',0(ams PO Download: Detail)')
click(commonBits.fl('Save As'))
if window('Input'):
select('OptionPane.textField', 'xxzzxx11')
click('OK')
close()
#select('TabbedPane', 'Extras')
#select('TabbedPane', 'Extras')
#select('TabbedPane', 'Child Records')
assert_p('ChildRecordsJTbl', 'Content', '[[, ams PO Download: Allocation, , , , , ams PO Download: Detail], [, ams PO Download: Detail, , , , , ]]')
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0(ams PO Download: Allocation)')
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0(ams PO Download: Allocation)')
assert_p('ChildRecordsJTbl', 'Text', 'cell:' + commonBits.fl('Child Record') + ',0(ams PO Download: Allocation)')
select('ChildRecordsJTbl', 'cell:' + commonBits.fl('Child Record') + ',0(ams PO Download: Allocation)')
click('BasicInternalFrameTitlePane$NoFocusButton2')
click('*')
select('RecordList.Record Name_Txt', 'xxzzxx')
select('RecordList.Description_Txt', '%')
## assert_p('ChildRecordsJTbl', 'Content', '[[, ams PO Download: Allocation, , , , , ams PO Download: Detail], [, ams PO Download: Detail, , , , , ]]')
assert_p('ChildRecordsJTbl', 'Content', '[[, ams PO Download: Allocation, , , , , ams PO Download: Detail], [, ams PO Download: Detail, , , , , ]]')
select('RecordList.Record Name_Txt', 'xxzzxx11')
select('RecordList.Description_Txt', '%%')
assert_p('ChildRecordsJTbl', 'Content', '[[, ams PO Download: Allocation, , , , , ams PO Download: Detail], [, ams PO Download: Detail, , , , , ]]')
click('BasicInternalFrameTitlePane$NoFocusButton2')
close()
|
19,364 | f711a776e39c3adfac4c6f11136f09e80bd899c9 | def reject(pvalue, alpha_levels=[.05,.01,.005]):
assert pvalue >= 0.
assert pvalue <= 1.
print('The probability of null hypothesis is {}% so we'.format(100*pvalue))
for critical in alpha_levels:
if critical > pvalue:
print('reject null hypothesis at {}% confidence interval.'.format(100*(1-critical)))
else:
print('do not reject null hypothesis at {}% confidence interval.'.format(100*(1-critical)))
|
19,365 | e216a6a222194233402dc7fb6f306e15915a6e52 | from rest_framework import routers
from .api import InstructorViewSet
router = routers.DefaultRouter()
router.register('api/Instructors', InstructorViewSet, 'Instructors')
urlpatterns = router.urls |
19,366 | 641b4e4617d37f4b3903d44175cb7d642acd1b0d | import sys
import os
from PyPDF2 import PdfFileReader
def get_info(path):
with open(path, 'rb') as f:
pdf = PdfFileReader(f)
info = pdf.getDocumentInfo()
#author = info.author
#creator = info.creator
#producer = info.producer
#subject = info.subject
if info is not None:
title = unicode(info.title).encode('utf8')
else:
title = "Title not recoverd"
print(path+"&&"+title)
if __name__ == '__main__':
folder=sys.argv[1]
print(folder)
for r, d, f in os.walk(folder):
for file in f:
if '.pdf' in file:
get_info(folder+file)
|
19,367 | db8ffbc6e5914fd7fef35281942980d59f5fee61 | import numpy
import scipy.stats
import matplotlib.pyplot as plt
def weighted_sample(ws, vs, n):
total = float(sum(w for w in ws))
i = 0
w = ws[0]
v = vs[0]
while n:
x = total * (1 - numpy.random.random() ** (1.0 / n))
total -= x
while x > w:
x -= w
i += 1
w = ws[i]
v = vs[i]
w -= x
yield v
n -= 1
# main
y = [-2, -1, 0, 1.5, 2.5]
step = 0.01
theta = numpy.arange(0, 1, 0.005)
densunnorm = []
for th in theta:
prod = 1
for yy in y:
prod = prod * scipy.stats.cauchy.pdf(yy, th, 1)
densunnorm.append(prod)
print (step*sum(densunnorm))
densnorm = densunnorm/(step*sum(densunnorm))*2
print densunnorm
print densnorm
print (step*sum(densunnorm))
res = [x for x in weighted_sample(step*densnorm, theta, 1000)]
plt.hist(res, 50)
plt.show()
|
19,368 | 7018481b7b04da2f87219872acf4f1fc483c92ca | #This program will start from a given video and find the linking of other users to it
import requests
from bs4 import BeautifulSoup
userdict={}
def youtube_crawler(url):
while True:
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, 'lxml')
# update count
for link in soup.findAll('a', {'class': 'g-hovercard'}):
x = link.string
if x in userdict:
userdict[x] =userdict[x]+ 1
else:
userdict[x] = 1
# update current video
for link in soup.findAll('a', {'class': "g-hovercard yt-uix-sessionlink spf-link "}):
y = link.string
if y in userdict:
userdict[x] =userdict[x]+ 1
else:
userdict[x] = 1
# print values
print(userdict)
# goto next video
for link in soup.findAll('a', {'class': " content-link spf-link yt-uix-sessionlink spf-link "}):
href = 'https://www.youtube.com' + link.get('href')
url = href
youtube_crawler('https://www.youtube.com/watch?v=oWA4TddxXG4')
|
19,369 | f2bb0e775902b3e1544b8f1fbbf462a89163bc71 | # Generated by Django 2.2.1 on 2019-05-30 09:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cinect_api', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Groups',
fields=[
('groupid', models.IntegerField(default=-1, primary_key=True, serialize=False)),
('groupname', models.CharField(max_length=17)),
],
options={
'db_table': 'groups',
},
),
migrations.CreateModel(
name='GroupUsers',
fields=[
('groupuserid', models.IntegerField(default=-1, primary_key=True, serialize=False)),
('groupid', models.IntegerField(default=-1)),
('username', models.CharField(max_length=20)),
],
options={
'db_table': 'groupusers',
},
),
]
|
19,370 | a82f1e5a1e546aa8dd45f4a4e6da093758cdff36 | from google.cloud import firestore
db = firestore.Client()
# Delete a collection content
# Whenever a collection is empty it is deleted
def delete_collection(coll_ref, batch_size):
docs = coll_ref.limit(batch_size).stream()
deleted = 0
for doc in docs:
print(u'Deleting doc {} => {}'.format(doc.id, doc.to_dict()))
doc.reference.delete()
deleted = deleted + 1
if deleted >= batch_size:
return delete_collection(coll_ref, batch_size)
collection_ref = db.collection('groups').document('bfmtv').collection('entries')
delete_collection(collection_ref, 1)
|
19,371 | 406fbc9d17b633c69eff687845975065aec0c252 | # -*- coding: utf-8 -*-
"""
Created on Wed Nov 6 17:25:44 2019
@author: Jagannath
"""
L = [1, 2, 4, 8, 16, 32, 64]
x = 5
i = 0
while i < len(L) and L[i]!=2**5:
i=i+1
else:
print ('Found in position',i+1,'in the list') |
19,372 | 575d9f1aea7645f173e19753580cf48ec2b28f1b | # ----------------------------------------------------------------------------------------------------------------------
#
# Class handling Surreal Dataset (training and testing)
#
# ----------------------------------------------------------------------------------------------------------------------
#
# Hugues THOMAS - 11/06/2018
# Nicolas DONATI - 01/01/2020
# ----------------------------------------------------------------------------------------------------------------------
#
# Imports and global variables
# \**********************************/
#
# Basic libs
import tensorflow as tf
import numpy as np
# Dataset parent class
from datasets.common import Dataset
import cpp_wrappers.cpp_subsampling.grid_subsampling as cpp_subsampling
# ----------------------------------------------------------------------------------------------------------------------
#
# Utility functions
# \***********************/
#
def grid_subsampling(points, features=None, labels=None, sampleDl=0.1, verbose=0):
"""
CPP wrapper for a grid subsampling (method = barycenter for points and features
:param points: (N, 3) matrix of input points
:param features: optional (N, d) matrix of features (floating number)
:param labels: optional (N,) matrix of integer labels
:param sampleDl: parameter defining the size of grid voxels
:param verbose: 1 to display
:return: subsampled points, with features and/or labels depending of the input
"""
if (features is None) and (labels is None):
return cpp_subsampling.compute(points, sampleDl=sampleDl, verbose=verbose)
elif (labels is None):
return cpp_subsampling.compute(points, features=features, sampleDl=sampleDl, verbose=verbose)
elif (features is None):
return cpp_subsampling.compute(points, classes=labels, sampleDl=sampleDl, verbose=verbose)
else:
return cpp_subsampling.compute(points, features=features, classes=labels, sampleDl=sampleDl, verbose=verbose)
# ----------------------------------------------------------------------------------------------------------------------
#
# Class Definition
# \***************/
#
class SurrealDataset(Dataset):
"""
Class to handle any subset of 5000 shapes of the surreal dataset introduced in 3D coded (for comparison in exp2)
this dataset is composed of 6890-points shapes, so the spectral data is relatively heavy.
"""
# Initiation methods
# ------------------------------------------------------------------------------------------------------------------
def __init__(self, config):
Dataset.__init__(self, 'surreal')
####################
# Dataset parameters
####################
# Type of task conducted on this dataset
# self.network_model = 'shape_matching' # this is the only type of model here but it comes from KPConc code
##########################
# Parameters for the files
##########################
# Path of the folder containing files
self.dataset_name = 'surreal'
self.path = '../../../media/donati/Data1/Datasets/shapes_surreal/'
self.data_folder = 'off_2/'
self.spectral_folder = 'spectral_full/'
self.txt_file = 'surreal5000_training.txt'
####################################################
####################################################
####################################################
# decide the number of shapes to keep in the training set (exp 2 setting)
self.split = config.split
self.num_train = config.num_train # -1 for all
# Number of eigenvalues kept for this model fmaps
self.neig = config.neig
self.neig_full = config.neig_full
# Number of thread for input pipeline
self.num_threads = config.input_threads
# Utility methods
# ------------------------------------------------------------------------------------------------------------------
def get_batch_gen(self, config):
"""
A function defining the batch generator for each split. Should return the generator, the generated types and
generated shapes
:param split: string in "training", "validation" or "test" (here we just keep training)
:param config: configuration file
:return: gen_func, gen_types, gen_shapes
"""
################
# Def generators
################
def random_balanced_gen():
print('trying to generate batch series with ', self.num_train, 'shapes')
# Initiate concatenation lists
tp_list = [] # points
tev_list = [] # eigen vectors
tevt_list = [] # transposed eigen vectors
tv_list = [] # eigen values
tevf_list = [] # full eigen vectors for ground truth maps
ti_list = [] # cloud indices
batch_n = 0
i_batch = 0
gen_indices = np.random.permutation(int(self.num_train)) # initiate indices for the generator
# if we had to test on this dataset we would need to introduce a test/val case with non-shuffled indices
# print(gen_indices.shape, config.batch_num)
# if config.split == 'test':
# print('test setting here not fully supported')
# n_shapes = self.num_test # has to be defined
# gen_indices = []
# for i in range(n_shapes - 1):
# for j in range(i + 1, n_shapes):
# gen_indices += [i, j] # put all the pairs in order
# gen_indices = np.array(gen_indices)
# Generator loop
for p_i in gen_indices:
# Get points and other input data
new_points = self.input_points[p_i]
new_evecs = self.input_evecs[p_i][:, :self.neig]
new_evecs_trans = self.input_evecs_trans[p_i][:self.neig, :]
new_evals = self.input_evals[p_i][:self.neig]
new_evecs_full = self.input_evecs_full[p_i][:, :self.neig]
n = new_points.shape[0]
if i_batch == config.batch_num:
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
tp_list = []
tev_list = []
tevt_list = []
tv_list = []
tevf_list = []
ti_list = []
batch_n = 0
i_batch = 0
# Add data to current batch
tp_list += [new_points]
tev_list += [new_evecs]
tevt_list += [new_evecs_trans]
tv_list += [new_evals]
tevf_list += [new_evecs_full]
ti_list += [p_i]
# Update batch size
batch_n += n
i_batch += 1
# yield the rest if necessary (it will not be a full batch and could lead to mistakes because of
# shape matching needing pairs !!!!)
yield (np.concatenate(tp_list, axis=0),
np.concatenate(tev_list, axis=0),
np.concatenate(tevt_list, axis=1),
np.concatenate(tv_list, axis=1),
np.concatenate(tevf_list, axis=0),
np.array(ti_list, dtype=np.int32),
np.array([tp.shape[0] for tp in tp_list]))
##################
# Return generator
##################
# Generator types and shapes
gen_types = (tf.float32, tf.float32, tf.float32, tf.float32, tf.float32, tf.int32, tf.int32)
gen_shapes = ([None, 3], [None, self.neig],
[self.neig, None], [self.neig, None], [None, self.neig], [None], [None])
return random_balanced_gen, gen_types, gen_shapes
def get_tf_mapping(self, config):
def tf_map(stacked_points, stacked_evecs, stacked_evecs_trans,
stacked_evals, stacked_evecs_full, obj_inds, stack_lengths):
"""
From the input point cloud, this function compute all the point clouds at each conv layer, the neighbors
indices, the pooling indices and other useful variables.
:param stacked_points: Tensor with size [None, 3] where None is the total number of points
:param stack_lengths: Tensor with size [None] where None = number of batch // number of points in a batch
"""
# Get batch indice for each point
batch_inds = self.tf_get_batch_inds(stack_lengths)
# Augment input points
stacked_points, scales, rots = self.tf_augment_input(stacked_points,
batch_inds,
config)
# First add a column of 1 as feature for the network to be able to learn 3D shapes
stacked_features = tf.ones((tf.shape(stacked_points)[0], 1), dtype=tf.float32)
# Then use positions or not
if config.in_features_dim == 1:
pass
elif config.in_features_dim == 3:
stacked_features = tf.concat((stacked_features, stacked_points), axis=1)
else:
raise ValueError('Only accepted input dimensions are 1, 3 (with or without XYZ)')
# Get the whole input list
input_list = self.tf_shape_matching_inputs(config,
stacked_points,
stacked_features,
stack_lengths,
batch_inds)
# Add scale and rotation for testing
input_list += [scales, rots, obj_inds]
input_list += [stack_lengths] # in order further on to multiply element-wise in the stack
input_list += [stacked_evecs, stacked_evecs_trans, stacked_evals]
input_list += [stacked_evecs_full]
return input_list
return tf_map
|
19,373 | 4580f94b2d857c833a83d582405c3bc2449776b8 | # --------CHANGE OVERS
######################################### 0 Intro #########################################
# Naming Convention: first letter of variable indicates the type
# a = array
# b = binary / boolean
# c = code, for .py files only
# d = dictionary
# f = float
# g = graph
# i = integer
# l = list
# lim = limit
# s = string
# file = file generated or modified through code
import pyodbc as db
import random
import pandas as pd
import numpy as np
import c02_geneticAlgorithmFunctions as gak
import c03_globalVariables as glob
import sys
import matplotlib.pyplot as plt
import math
import datetime
import os
fMinFitness = 10000000000
iGenerationCount = 0
lMinFitness = [10000000000, 'START', [],[],""]
lMinFitness_history = [10000000000]
lFitness_history=[]
lIllegal_history=[]
######################################### 1 DATA IMPORT #########################################
### 1.1 Get Material Family Data
# for now based on excel; check c13_ImportFromSQL.py for SQL import cide
dFamilyCO = {}
dMaterialFamily = {}
dWcList = {}
dMachineConfig = {}
dMaterialCO ={}
glob.lFamilyAtlas_0 = []
glob.lMaterialAtlas_0 = []
#import from Excel
dfWCImport = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="order")
dfFamilies = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="families")
dfFamilyCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="familyCO")
dfMachineConfig = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="notOnMachine")
dfMaterialCO = pd.read_excel(os.path.join(glob.sPathToExcels, "03_co_setup_alternative.xlsx"), sheet_name="materialCO")
#fill WC List
for index, row in dfWCImport.iterrows():
if index >= glob.iBreakImport:
break
dWcList[row.orderNumber]={}
dWcList[row.orderNumber]['material'] = row.materialCode
dWcList[row.orderNumber]['quantity'] = row.quantity
dWcList[row.orderNumber]['priority'] = row.priority
#Create TimeMatrix dictionary from Query Results
for index, row in dfFamilyCO.iterrows():
dFamilyCO[row.relID]= row['time']
glob.lFamilyAtlas_0.append(row["familyAtlas"])
#Create materialFamily dictionary from Query Results
for index, row in dfFamilies.iterrows():
dMaterialFamily[row.Material] = {}
dMaterialFamily[row.Material]['family'] = row.materialFamily
dMaterialFamily[row.Material]['cycleTime'] = row.cycleTime
#Create MachineConfig >> ILLEGAL MACHINE CONFIG, machines the family is not allowed on
for index, row in dfMachineConfig.iterrows():
dMachineConfig[row.family] = [int(x) for x in str(row.notOnMachine).split(",")]
#Create Material changeover time mapping
for index, row in dfMaterialCO.iterrows():
dMaterialCO[row.materialRel] = row["timeCO"]
glob.lMaterialAtlas_0.append(row["materialAtlas"])
#open file to track usage history
filePopulationHistory = open(os.path.join(glob.sPathToExcels, "90_populationHistory.txt"), "w", encoding="utf-8")
fileFitnessHistory_runs = open(os.path.join(glob.sPathToExcels, "91_fitnessRuns.txt"), "a", encoding="utf-8")
######################################### 2 GA SETUP #########################################
# TO DO
# > use a more intelligent filling for initial population
### 2.1 Iterate over WC list and populate arrays
# initialize population randomly from list
lGenome = []
lPopulation = []
dPopulation = {}
lPopulation_names =[]
glob.lGenome_0 = []
# create original genome with all orders contained
for order in dWcList.keys():
glob.lGenome_0.append(order)
# create list of 0s to fill fill the genome to a length of machines x genes to represent all possible machines in one genome
lEmptyAppend = [i*0 for i in range(0, (glob.iNumberMachines-1)*len(glob.lGenome_0))]
lGenome = glob.lGenome_0+lEmptyAppend
# from the filled Genome, create n = limPopulationSize initial parents
for i in range(0,glob.limPopulationSize):
lNewMember, lNewBreaker = gak.udf_makeNewMember(glob.lGenome_0)
gak.udf_listSortByBreak(lNewMember, lNewBreaker, 0)
# populate the Population dictionary
dPopulation["member"+str(i)] = {}
dPopulation["member"+str(i)]["genome"] = lNewMember
dPopulation["member"+str(i)]["breaker"] = lNewBreaker
# write the first population to the history file
filePopulationHistory.write("#"+str(iGenerationCount)+".1------------------------ Original Population ------------------------"+"\n")
for i,w in enumerate(lPopulation):
filePopulationHistory.write(lPopulation_names[i]+": "+str(w)+"\n")
######################################### 3 GA Algorithm #########################################
# ! Arrays ending on "_names" are parallel arrays to track member names
# iterate until break point reached (see below)
iBreakLoop = glob.iBreakGeneration
while iGenerationCount < iBreakLoop:
fIllegalPerc = 0.0
iGenerationCount += 1
print("--------------------------------- GENERATION: "+str(iGenerationCount)+"---------------------------------")
# execute function to calculate fitness of population
# determine randomly if a cataclysm should occur; cataclsym = "kills off" the population and fills it with newly created one
if random.uniform(0.0, 1.0) < glob.iCataclysmicProb and glob.bCataclysm == True:
print("<<<<<<<<<<<<<<<<<<< CATACLYSM TIME <<<<<<<<<<<<<<<<<<<")
dPopulation = gak.udf_cataclysm(dPopulation, glob.lGenome_0)
# Add runs to the overall counter after cataclysm
glob.iCataclysmicProb = glob.iCataclysmicProb/2
iBreakLoop += glob.iBreakGeneration
# calculte fitness for each member in the population
lFitness, dMembers, lMinFitness, fMinFitness_run, fIllegalPerc = gak.udf_calcFitness3(dPopulation, dWcList, dMaterialFamily, dFamilyCO, dMaterialCO, lMinFitness, dMachineConfig, iGenerationCount)
lFitness_history.append(fMinFitness_run)
lIllegal_history.append(fIllegalPerc)
# if the fitness is lower then the previous fintness lever, update the minimum fitness
if lMinFitness[0] <= fMinFitness:
fMinFitness = lMinFitness[0]
# append calculated fitness for new lowest level
lMinFitness_history.append(fMinFitness)
# create table and calculate selection probabilities
lFitness_sorted = gak.udf_sortByFitness(lFitness)
# initialize population arrays
lPopulation_new = []
lPopulation_new_names = []
dPopulation_new ={}
# select parents randomly to form new population
lPopulation_new, lPopulation_new_names, dPopulation_new = gak.udf_selectParentsFromPool(dMembers, lFitness_sorted, dPopulation)
# Mating time - execute mating functions and initialize offspring arrays
lPopulation_offspring = []
lPopulation_offspring_names = []
dPopulation_offspring ={}
# lPopulation_offspring, glob.iChildCounter, lPopulation_offspring_names, dPopulation_offspring = gak.udf_matingPMX(lPopulation_new, glob.iChildCounter, lPopulation_new_names, dPopulation_new, dMembers, glob.fMutationRate)
dPopulation_offspring = gak.udf_cloneMutate(dPopulation_new, dMembers, dMaterialFamily, dMachineConfig, dWcList, lGenome)
# Mutating Time - execute swap-mutate function
gak.udf_mutateSwap(glob.fMutationRate, dPopulation_offspring)
# recreate the population array with the selected parents from previous iteration
dPopulation={}
for i,member in dPopulation_new.items():
# avoid double entries, which are technically possible due to selection method
if member["member"] not in dPopulation:
dPopulation[member["member"]]={}
dPopulation[member["member"]]["genome"]=member["genome"]
dPopulation[member["member"]]["breaker"]=member["breaker"]
# deconstruct newly created parent array and the mutated offspring array
dPopulation = {**dPopulation, **dPopulation_offspring}
# calculate starting point for trailing average
iAvgStart = len(lMinFitness_history)-glob.iPastAverage
if iAvgStart < 5:
iAvgStart = 0
# break the while loop if no lower fitness could be found for "iAvgStart" number of generations
if sum(lMinFitness_history[(iAvgStart):(len(lMinFitness_history))])/((len(lMinFitness_history))-iAvgStart) == fMinFitness:
break
# close file
filePopulationHistory.close()
# terminal end prints
print("===============================================================================================")
print("RESULT: ", lMinFitness[0])
print("MEMBER: ", lMinFitness[1])
print( lMinFitness[4])
print(np.corrcoef(lFitness_history, lIllegal_history)[1])
# print machines in termial
gak.udf_printMachinesCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1])
print("__________________________________________")
# print machines with familes not materials
gak.udf_printMachinesFamCMD(lMinFitness[2], lMinFitness[3], lMinFitness[1], dMaterialFamily, dWcList)
######################################### 4 Graphing it #########################################
# set min and max for the y axes
y1Min = math.floor(min(lFitness_history)/1000)*1000
y1Max = math.ceil(min(lFitness_history)/1000)*2000
y2Min = math.floor(min(lIllegal_history))-0.1
y2Max = math.ceil(min(lIllegal_history))+0.1
# set parameters for saving the plot
dateTime = datetime.datetime.now()
iMilliseconds = int(round(dateTime.timestamp() * 1000))
sFileNamePlot = str(iMilliseconds)+"__RESULT_"+str(math.floor(lMinFitness[0]))+"__orders_"+str(len(glob.lGenome_0))+"--machines_"+str(glob.iNumberMachines)+"--Runs_"+str(glob.iBreakGeneration)+"--popSize_"+str(glob.limPopulationSize)+"--mut_"+str(glob.fMutationRate)+"--King_"+str(glob.bKingPrevails)+"--fAlloc_"+str(glob.iForceAllocation_G)+"--CAT_"+str(glob.bCataclysm)+"_"+str(glob.iCataclysmicProb)+"_"+str(glob.iDeletionProb)
sPlotPath = os.path.join(glob.sPathToExcels, "99_Output",sFileNamePlot+".png")
# create subplot
gFitness, ax1 = plt.subplots()
# set options
color = "tab:blue"
ax1.set_ylabel("Fitness")
ax1.set_xlabel("Runs")
ax1.set_ylim(y1Min, y1Max)
ax1.plot(lFitness_history, color=color)
# create twin axis and set
ax2 = plt.twinx()
color ="tab:green"
ax2.set_ylabel("Illegal Percentage")
ax2.set_ylim(y2Min, y2Max)
ax2.plot(lIllegal_history, color=color, linestyle="--")
gFitness.tight_layout()
#save and plot
plt.savefig(sPlotPath)
plt.show()
fileFitnessHistory_runs.write(str(lMinFitness[0])+"\n")
############################################## THE END ############################################## |
19,374 | 2233232a31fb046186dbee11ace4ddcab15a02e6 |
#-------------
f = open('./1.txt','r')
datas = f.readlines()
for i in datas:
print i.strip() |
19,375 | dc4565837fb1cff2533ae841a29a72c4c847ebb6 | """
Tests if websocket connection ~ collects data
"""
import asyncio
import json
import websockets
async def hello():
uri = "wss://ws-feed.pro.coinbase.com/"
async with websockets.connect(uri) as websocket:
print('before send')
sub = {
"type": "subscribe",
"product_ids": [
#"USD-BTC",
"ETH-USD"
],
"channels": [
"full",
]
}
await websocket.send(json.dumps(sub))
x = 1
with open("res.json", "w") as file:
while x < 2000000:
x += 1
greeting = await websocket.recv()
print(x)
print(greeting)
file.write(greeting)
file.write("\n")
asyncio.get_event_loop().run_until_complete(hello()) |
19,376 | 0ec8f67511c2790e73ad7dded9546de605ef595c | from django.shortcuts import render, redirect
from django.contrib.auth import authenticate, login
from django.views.decorators.csrf import csrf_protect
from django.views.decorators.csrf import csrf_exempt
from django.contrib import messages
from django.contrib.auth.models import User
from M3DB.models import *
@csrf_exempt
def main(request):
username,password,mynext,firstname= '','','',''
mynext = request.GET.get('next') or None
if mynext is None: mynext = 'viewProject'
if request.POST:
username = request.POST.get('username')
password = request.POST.get('password')
mynext = request.POST.get('next') or None
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user)
#proj = Project.objects.get_or_none(user=user)
return redirect(mynext)
params = {
'username' : username,
'firstname': firstname,
'next': mynext,
}
return render(request, 'login.html', params)
|
19,377 | 39d86ac06718ba63bc436f900710d15a43f67c20 | #*******************************************************#
'''
Program to implement watershed algorithm step by step to perfom image segmentation
of the brain MRI scan image.
'''
#*******************************************************#
''' Description
- Grayscale images seen as topographic surface
- High intensity = peaks and hills
- Low intensity = valley
- Fill isolated valleys - local minima with colored water (labels)
- With water rising, peaks (gradient) with different colors tends to merge
- This approach gives oversegmented results due to noise or other irregularities in the image
- So in opencv, we have an approach based on marker watershed algorithm where we can specify
which are valley poiints to be merged.
- It is an interactive image segmentation
'''
import cv2
import numpy as np
import matplotlib.pyplot as plt
def watershed(image):
###################PREPROCESSING
#Read the image and convert to gray scale
img = cv2.imread(image)
img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
#smoothing the image
img_median = cv2.medianBlur(img_gray,5)
#edge detection using sobelX, sobelY
img_sobelx = cv2.Sobel(img_median,cv2.CV_8U,dx=1,dy=0,ksize=3)
img_sobely = cv2.Sobel(img_median,cv2.CV_8U,dx=0,dy=1,ksize=3)
img_sobel = img_sobelx+img_sobely+img_gray
ret, th1 = cv2.threshold(img_sobel,55,255,cv2.THRESH_BINARY)
#having foreground and background image
kernel = np.ones((3,3),np.uint8)
# To remove any small white noises in the image using morphological opening
opening = cv2.morphologyEx(th1, cv2.MORPH_OPEN,kernel,iterations=2)
#background
# Black region shows sure background area
# Dilation increases object boundary to background.
sure_bg = cv2.dilate(opening,kernel,iterations=3)
#white region shows sure foreground region
dist = cv2.distanceTransform(opening,distanceType=cv2.DIST_L2, maskSize=5)
ret,sure_fg = cv2.threshold(dist, 0.2*dist.max(),255,0) #threshold value needs to change if tumor is not segmented
# Identifying regions where we don't know whether foreground and background
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
#######Waterhsed algorithm
contours, hierarchy = cv2.findContours(sure_fg,cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
#numpy array for amrkers
marker = np.zeros((img_gray.shape[0],img_gray.shape[1]),dtype=np.int32)
marker = np.int32(sure_fg)+np.int32(sure_bg)
#marker labeling
for i in range(len(contours)):
cv2.drawContours(marker,contours,i,i+2,-1)
marker = marker + 1 #to make sure background is not black 0
marker[unknown==255] = 0 #unknown regions as 0
copy_img = img.copy()
cv2.watershed(copy_img,marker)
copy_img[marker == -1] = [255,0,255]
return img_gray,img_median, img_sobel, th1,opening,sure_bg,sure_fg,unknown,marker,copy_img
### Image data
image_path = 'image_tumor.jpg'
#preprocessing function
tumor = watershed(image_path)[0]
img_median = watershed(image_path)[1]
img_sobel = watershed(image_path)[2]
threshold = watershed(image_path)[3]
opening = watershed(image_path)[4]
sure_bg = watershed(image_path)[5]
sure_fg = watershed(image_path)[6]
unknown= watershed(image_path)[7]
marker = watershed(image_path)[8]
img_mapped = watershed(image_path)[9]
#####################################
## Plotting all stages of watershed algorithm ##
#####################################
fig, axs = plt.subplots(2,5, figsize=(18,6), facecolor='w', edgecolor='k')
fig.subplots_adjust(hspace = .25, wspace=.5)
axs = axs.ravel()
axs[0].imshow(tumor,'gray')
axs[0].set_title('1. Orginial Gray Image')
axs[1].imshow(img_median,'gray')
axs[1].set_title('2. After Smoothing')
axs[2].imshow(img_sobel,'gray')
axs[2].set_title('3. After Edge Detection')
axs[3].imshow(threshold,'gray')
axs[3].set_title('4. After Thresholding')
axs[4].imshow(opening,'gray')
axs[4].set_title('5. Removing small white noises')
axs[5].imshow(sure_bg,'gray')
axs[5].set_title('6. Background')
axs[6].imshow(sure_fg,'gray')
axs[6].set_title('7. Foreground')
axs[7].imshow(unknown,'gray')
axs[7].set_title('8. Unknown regions')
axs[8].imshow(marker)
axs[8].set_title('9. Watershed Algorithm')
axs[9].imshow(img_mapped)
axs[9].set_title('10. Final Mapping')
fig.colorbar(axs[8].imshow(marker),ax=axs[8])
plt.show()
|
19,378 | ccf5b0dec5c50e9aaae88969db2ec830ec0707ce |
import torch
import numpy as np
import h5py
from allennlp.models.archival import load_archive
from kb.common import JsonFile
# includes @@PADDING@@, @@UNKNOWN@@, @@MASK@@, @@NULL@@
NUM_EMBEDDINGS = 117663
def generate_wordnet_synset_vocab(entity_file, vocab_file):
vocab = ['@@UNKNOWN@@']
with JsonFile(entity_file, 'r') as fin:
for node in fin:
if node['type'] == 'synset':
vocab.append(node['id'])
vocab.append('@@MASK@@')
vocab.append('@@NULL@@')
with open(vocab_file, 'w') as fout:
fout.write('\n'.join(vocab))
def extract_tucker_embeddings(tucker_archive, vocab_file, tucker_hdf5):
archive = load_archive(tucker_archive)
with open(vocab_file, 'r') as fin:
vocab_list = fin.read().strip().split('\n')
# get embeddings
embed = archive.model.kg_tuple_predictor.entities.weight.detach().numpy()
out_embeddings = np.zeros((NUM_EMBEDDINGS, embed.shape[1]))
vocab = archive.model.vocab
for k, entity in enumerate(vocab_list):
embed_id = vocab.get_token_index(entity, 'entity')
if entity in ('@@MASK@@', '@@NULL@@'):
# these aren't in the tucker vocab -> random init
out_embeddings[k + 1, :] = np.random.randn(1, embed.shape[1]) * 0.004
elif entity != '@@UNKNOWN@@':
assert embed_id != 1
# k = 0 is @@UNKNOWN@@, and want it at index 1 in output
out_embeddings[k + 1, :] = embed[embed_id, :]
# write out to file
with h5py.File(tucker_hdf5, 'w') as fout:
ds = fout.create_dataset('tucker', data=out_embeddings)
def get_gensen_synset_definitions(entity_file, vocab_file, gensen_file):
from gensen import GenSen, GenSenSingle
gensen_1 = GenSenSingle(
model_folder='./data/models',
filename_prefix='nli_large_bothskip',
pretrained_emb='./data/embedding/glove.840B.300d.h5'
)
gensen_1.eval()
definitions = {}
with open(entity_file, 'r') as fin:
for line in fin:
node = json.loads(line)
if node['type'] == 'synset':
definitions[node['id']] = node['definition']
with open(vocab_file, 'r') as fin:
vocab_list = fin.read().strip().split('\n')
# get the descriptions
sentences = [''] * NUM_EMBEDDINGS
for k, entity in enumerate(vocab_list):
definition = definitions.get(entity)
if definition is None:
assert entity in ('@@UNKNOWN@@', '@@MASK@@', '@@NULL@@')
else:
sentences[k + 1] = definition
embeddings = np.zeros((NUM_EMBEDDINGS, 2048), dtype=np.float32)
for k in range(0, NUM_EMBEDDINGS, 32):
sents = sentences[k:(k+32)]
reps_h, reps_h_t = gensen_1.get_representation(
sents, pool='last', return_numpy=True, tokenize=True
)
embeddings[k:(k+32), :] = reps_h_t
print(k)
with h5py.File(gensen_file, 'w') as fout:
ds = fout.create_dataset('gensen', data=embeddings)
def combine_tucker_gensen(tucker_hdf5, gensen_hdf5, all_file):
with h5py.File(tucker_hdf5, 'r') as fin:
tucker = fin['tucker'][...]
with h5py.File(gensen_hdf5, 'r') as fin:
gensen = fin['gensen'][...]
all_embeds = np.concatenate([tucker, gensen], axis=1)
all_e = all_embeds.astype(np.float32)
with h5py.File(all_file, 'w') as fout:
ds = fout.create_dataset('tucker_gensen', data=all_e)
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--generate_wordnet_synset_vocab', default=False, action="store_true")
parser.add_argument('--entity_file', type=str)
parser.add_argument('--vocab_file', type=str)
parser.add_argument('--generate_gensen_embeddings', default=False, action="store_true")
parser.add_argument('--gensen_file', type=str)
parser.add_argument('--extract_tucker', default=False, action="store_true")
parser.add_argument('--tucker_archive_file', type=str)
parser.add_argument('--tucker_hdf5_file', type=str)
parser.add_argument('--combine_tucker_gensen', default=False, action="store_true")
parser.add_argument('--all_embeddings_file', type=str)
args = parser.parse_args()
if args.generate_wordnet_synset_vocab:
generate_wordnet_synset_vocab(args.entity_file, args.vocab_file)
elif args.generate_gensen_embeddings:
get_gensen_synset_definitions(args.entity_file, args.vocab_file, args.gensen_file)
elif args.extract_tucker:
extract_tucker_embeddings(args.tucker_archive_file, args.vocab_file, args.tucker_hdf5_file)
elif args.combine_tucker_gensen:
combine_tucker_gensen(args.tucker_hdf5_file, args.gensen_file, args.all_embeddings_file)
else:
raise ValueError
|
19,379 | beb4418151e0e65cea79107b77fae9985ce98658 | from django.contrib import admin
from django.urls import path
from . import views
from django.contrib.staticfiles.urls import static
from task import settings
urlpatterns = [
path('', views.home,name='home'),
path("search/", views.search, name="Search"),
path('login/', views.handleLogin,name='login'),
path('logout/', views.handleLogout,name='logout'),
path('signup/', views.signup,name='signup'),
]+static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
19,380 | c6a41e18a5a45657a9b9bc6fa064ad3cca05832f | # Author: Michael Reilly
# Purpose of Program: To process .csv files and form triads based on the opinions of epinion users.
# The program accepts a prompted filename from the user as input, as can be seen by how to run from the terminal window.
# Then it finds all self-loops in the triads, counts them, but do not add to traid analysis.
# Since this will use networkx, that should not be a problem.
# Count the number of positive and negative reviews to create an expected distributions of four triad types for comparison to the actual distribution.
# For expected distribution assume positive and negative trust values are randomly assigned.
# Identify the triads in the graph, for each triad, and must know value of edges in the triangle formed by the three nodes.
# Identify which of the four triad types it represents and add to appropriate count.
# Output will contain:
# 1. Number of edges in the network
# 2. Number of self-loops
# 3. Number of edges used to identify triads (TotEdges)[edges - self-loops]
# 4. Number of positive edges (ignore self-loops)
# 5. Number of negative edges (ignore self-loops)
# 6. Probability p that an edge will be positive: (number of positive edges)/TotEdges
# 7. Probability that an edge will be negative: 1-p
# 8. Expected distribution of triad types (based on p and 1-p applied to the number of triangles in the graph). Show number and percent.
# a. Trust-Trust-Trust b. Trust-Trust-Distrust c. Trust-Distrust-Distrust d. Distrust-Distrust-Distrust e. Total
# 9. Actual distribution of triad types. Show number and percent.
# a. Trust-Trust-Trust b. Trust-Trust-Distrust c. Trust-Distrust-Distrust d. Distrust-Distrust-Distrust e. Total
#
# The implementation I did using networkx was unable to do the epinions.csv file, so the only outputs are epinion96.csv and epionion_small.csv files
# To run from terminal window: python3 Reilly_HW5.py filename
import networkx as nx
from itertools import combinations as comb
import argparse
# function triad_processing retrieves the data of the .csv file and processes all the possible triads.
def triad_processing(f_name):
# universal variables for the number of positive edges, negative edges, self_loops, and total edges
num_of_positives=0
num_of_negatives=0
num_of_self_loops=0
num_of_edges=0
# make an empty graph where all the triads will be placed
Graph=nx.Graph()
# reads the csv file row by row and collects the data
with open(f_name, "r") as triads_file:
for row in triads_file:
# Makes a list of three values, the reviewer, the reviewee, and the weight of the relationship
reviewer, reviewee, weight=list(map(int, row.split(",")))
# Put in the graph the nodes as well as the edge between them for future reference
Graph.add_node(reviewer)
Graph.add_node(reviewee)
Graph.add_edge(reviewer, reviewee, weight=weight)
# self_loop counter
if(reviewer==reviewee):
num_of_self_loops+=1
# Based on weighted graph documentation at: https://networkx.org/documentation/stable/auto_examples/drawing/plot_weighted_graph.html
for (reviewer, reviewee, weight) in Graph.edges(data=True):
if(weight["weight"]==1):
num_of_positives+=1
if(weight["weight"]==-1):
num_of_negatives+=1
# built-in function in networkx to count the number of edges
num_of_edges=Graph.number_of_edges()
print("\nEdges in network: "+str(num_of_edges))
print("Self-loops: "+str(num_of_self_loops))
# TotEdges=(all edges-self loops)
TotEdges=(num_of_edges-num_of_self_loops)
print("Edges used - TotEdges: "+str(TotEdges))
# Check all edges for their weight
print("trust edges: "+str(num_of_positives))
# P(Trust)=Positives/TotEdges
p_positive=(num_of_positives/num_of_edges)
print("probability p: {:.2f}".format(p_positive))
print("distrust edges: "+str(num_of_negatives))
# P(Distrust)=Negatives/TotEdges
p_negative=(num_of_negatives/num_of_edges)
print("probability 1-p: {:.2f}".format(p_negative))
# Networkx has a builtin triangles function which creates a dictionary of all the triangles
# Each triangle is counted 3 times (for each node) so total dictionary entries/3 is the number of triangles
# This information found: https://stackoverflow.com/questions/60426256/finding-total-number-of-triangles-using-networkx
trianglex3=nx.triangles(Graph)
num_of_triangles=sum(trianglex3.values())/3
print("Triangles: {:.0f}".format(num_of_triangles))
print("\nExpected Distribution")
print("Type\tpercent\tnumber")
# Expected Values for TTT, only one combo so no *3
# percent=All p's multiplied together
# number=percent*all triangles
TTT_percent=(p_positive*p_positive*p_positive)
TTT_number=(num_of_triangles*TTT_percent)
print("TTT\t{:.1f}".format(TTT_percent*100)+"\t{:.1f}".format(TTT_number))
# Expected Values for TTD, 3 combos TTD TDT DTT
TTD_percent=((p_positive*p_positive*p_negative)*3)
TTD_number=(num_of_triangles*TTD_percent)
print("TTD\t{:.1f}".format(TTD_percent*100)+"\t{:.1f}".format(TTD_number))
# Expected Values for TDD, 3 comboes TDD DTD DDT
TDD_percent=((p_positive*p_negative*p_negative)*3)
TDD_number=(num_of_triangles*TDD_percent)
print("TDD\t{:.1f}".format(TDD_percent*100)+"\t{:.1f}".format(TDD_number))
# Expected Values for DDD, only one combo
DDD_percent=(p_negative*p_negative*p_negative)
DDD_number=(num_of_triangles*DDD_percent)
print("DDD\t{:.1f}".format(DDD_percent*100)+"\t{:.1f}".format(DDD_number))
# Expected totals
Total_percent=(TTT_percent+TTD_percent+TDD_percent+DDD_percent)
Total_number=(round(TTT_number,1)+round(TTD_number,1)+round(TDD_number,1)+round(DDD_number,1))
print("Total\t{:.0f}".format(Total_percent*100)+"\t{:.1f}".format(Total_number))
# variables for each type of Triad possible
num_of_TTT=0
num_of_TTD=0
num_of_TDD=0
num_of_DDD=0
# get_edge_attributes is a networkx function that finds the name of an attribute in the graph
weight=nx.get_edge_attributes(Graph, 'weight')
# finds all graphs of size 3, all triads
all_triads=[i for i in nx.enumerate_all_cliques(Graph) if(len(i)==3)]
# listifies it based on: https://networkx.org/documentation/networkx-1.10/reference/generated/networkx.algorithms.clique.enumerate_all_cliques.html, since need to get all sets of 3
list_of_triads=list(map(lambda i: list(map(lambda i: (i, weight[i]), comb(i,2))), all_triads))
# the indices of the list, get all 3 weights
for index in list_of_triads:
weight1=index[0][1]
weight2=index[1][1]
weight3=index[2][1]
# all 3 weights are 1, TTT
if(weight1==1 and weight2==1 and weight3==1):
num_of_TTT+=1
# 2 weights are 1, 1 weight is -1, TTD, all 3 possibile combos
if(weight1==1 and weight2==1 and weight3==-1):
num_of_TTD+=1
if(weight1==1 and weight2==-1 and weight3==1):
num_of_TTD+=1
if(weight1==-1 and weight2==1 and weight3==1):
num_of_TTD+=1
# 1 weight is -1, 2 weights are 1, TDD, all 3 possibile conbos
if(weight1==1 and weight2==-1 and weight3==-1):
num_of_TDD+=1
if(weight1==-1 and weight2==1 and weight3==-1):
num_of_TDD+=1
if(weight1==-1 and weight2==-1 and weight3==1):
num_of_TDD+=1
# all 3 weights are -1, DDD
if(weight1==-1 and weight2==-1 and weight3==-1):
num_of_DDD+=1
print("\nActual Distribution")
print("Type\tpercent\tnumber")
# % is count of TTT divided by all triangles
percent_of_TTT=(num_of_TTT/num_of_triangles)
print("TTT\t{:.1f}".format(percent_of_TTT*100)+"\t"+str(num_of_TTT))
# % is count of TTD divided by all triangles
percent_of_TTD=(num_of_TTD/num_of_triangles)
print("TTD\t{:.1f}".format(percent_of_TTD*100)+"\t"+str(num_of_TTD))
# % is count of TDD divided by all triangles
percent_of_TDD=(num_of_TDD/num_of_triangles)
print("TDD\t{:.1f}".format(percent_of_TDD*100)+"\t"+str(num_of_TDD))
# % is count of DDD divided by all triangles
percent_of_DDD=(num_of_DDD/num_of_triangles)
print("DDD\t{:.1f}".format(percent_of_DDD*100)+"\t"+str(num_of_DDD))
# Totals are just an add of TTT, TTD, TDD, and DDD components
percent_of_Total=(percent_of_TTT+percent_of_TTD+percent_of_TDD+percent_of_DDD)
num_of_Total=(num_of_TTT+num_of_TTD+num_of_TDD+num_of_DDD)
print("Total\t{:.0f}".format(percent_of_Total*100)+"\t"+str(num_of_Total))
# main routine
if __name__ == "__main__":
# argparse will parse the arguments passed in through command line
parser=argparse.ArgumentParser(description="Triad Data Collection from epinions.")
# The argument we are looking for, filename
parser.add_argument("filename", default="fake.csv")
# parse_args on parser to get a usable version of the results
fileName=parser.parse_args()
# the function to start using the data
triad_processing(fileName.filename)
|
19,381 | eb6cea765d088de2a5aefe9a4b509cbdcfaaf982 | # -*- coding: utf-8 -*-
import codecs
import json
from draftjs_exporter.constants import BLOCK_TYPES, ENTITY_TYPES, INLINE_STYLES
from draftjs_exporter.defaults import BLOCK_MAP, STYLE_MAP
from draftjs_exporter.dom import DOM
from draftjs_exporter.html import HTML
from draftjs_exporter.types import Element, Props
def image(props: Props) -> Element:
return DOM.create_element('img', {
'src': props.get('src'),
'width': props.get('width'),
'height': props.get('height'),
'alt': props.get('alt'),
})
def link(props: Props) -> Element:
return DOM.create_element('a', {
'href': props['url']
}, props['children'])
content_state = {
"entityMap": {
"0": {
"type": "LINK",
"mutability": "MUTABLE",
"data": {
"url": "https://github.com/facebook/draft-js"
}
},
},
"blocks": [
{
"key": "b0ei9",
"text": "draftjs_exporter is an HTML exporter for Draft.js content",
"type": "header-two",
"depth": 0,
"inlineStyleRanges": [],
"entityRanges": [
{
"offset": 41,
"length": 8,
"key": 0
}
],
"data": {}
},
]
}
# Demo content from https://github.com/springload/draftjs_exporter/blob/master/example.py.
# with open('docs/example.json') as example:
# content_state = json.load(example)
if __name__ == '__main__':
exporter = HTML({
'block_map': BLOCK_MAP,
'style_map': STYLE_MAP,
'entity_decorators': {
# Map entities to components so they can be rendered with their data.
ENTITY_TYPES.IMAGE: image,
ENTITY_TYPES.LINK: link,
# Lambdas work too.
ENTITY_TYPES.HORIZONTAL_RULE: lambda props: DOM.create_element('hr'),
# Discard those entities.
ENTITY_TYPES.EMBED: None,
},
'engine': 'draftjs_exporter_rust_engine.engine.DOMString',
})
markup = exporter.render(content_state)
print(markup)
# Output to a Markdown file to showcase the output in GitHub (and see changes in git).
with codecs.open('docs/example.txt', 'w', 'utf-8') as file:
file.write(
"""# Example output (generated by [`example.py`](../example.py))
---
{markdown}---
""".format(markdown=markup))
import string_engine
print(string_engine.sum_as_string(4, 3))
|
19,382 | f10507c4d1397839f7b2b603923b52d19f0bddd5 | import torch
import torch.autograd as autograd
import torchvision.datasets as datasets
import torchvision.transforms as transforms
def dataloader(root, train_batch_size, test_batch_size, num_workers):
transform_train = transforms.Compose([
transforms.RandomResizedCrop(32, scale=(0.7, 1.0), ratio=(1.0,1.0)),
transforms.ColorJitter(
brightness=0.1*torch.randn(1),
contrast=0.1*torch.randn(1),
saturation=0.1*torch.randn(1),
hue=0.1*torch.randn(1)),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
])
transform_test = transforms.Compose([
transforms.CenterCrop(32),
transforms.ToTensor(),
transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)),
])
trainset = datasets.CIFAR10(root =root, train = True, transform = transform_train, download = True)
trainloader = torch.utils.data.DataLoader(trainset, batch_size = train_batch_size, shuffle = True, num_workers = num_workers)
testset = datasets.CIFAR10(root = root, train = False, transform = transform_test, download = False)
testloader = torch.utils.data.DataLoader(testset, batch_size = test_batch_size, shuffle = False, num_workers = num_workers)
return trainloader, testloader
def calc_gradient_penalty(batch_size, netD, real_data, fake_data):
DIM = 32
LAMBDA = 10
alpha = torch.rand(batch_size, 1)
alpha = alpha.expand(batch_size, int(real_data.nelement()/batch_size)).contiguous()
alpha = alpha.view(batch_size, 3, DIM, DIM)
alpha = alpha.cuda()
fake_data = fake_data.view(batch_size, 3, DIM, DIM)
interpolates = alpha * real_data.detach() + ((1 - alpha) * fake_data.detach())
interpolates = interpolates.cuda()
interpolates.requires_grad_(True)
disc_interpolates, _ = netD(interpolates)
gradients = autograd.grad(outputs=disc_interpolates, inputs=interpolates,
grad_outputs=torch.ones(disc_interpolates.size()).cuda(),
create_graph=True, retain_graph=True, only_inputs=True)[0]
gradients = gradients.view(gradients.size(0), -1)
gradient_penalty = ((gradients.norm(2, dim=1) - 1) ** 2).mean() * LAMBDA
return gradient_penalty
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import os
def plot(samples):
fig = plt.figure(figsize=(10, 10))
gs = gridspec.GridSpec(10, 10)
gs.update(wspace=0.02, hspace=0.02)
for i, sample in enumerate(samples):
ax = plt.subplot(gs[i])
plt.axis('off')
ax.set_xticklabels([])
ax.set_yticklabels([])
ax.set_aspect('equal')
plt.imshow(sample)
return fig |
19,383 | 157fe96ffe5cdabdc4651673a5056d31210a7418 | class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
# class Solution:
# def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
#
# if not p and not q:
# return True
#
# if (p and not q) or (not p and q):
# return False
#
# if (not p.left and not p.right) and (not q.left and not q.right):
# return p.val == q.val
#
# if ((p.left and not q.left) or (not p.left and q.left)) or (
# (p.right and not q.right) or (not p.right and q.right)):
# return False
# return self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
class Solution:
def isSameTree(self, p: TreeNode, q: TreeNode) -> bool:
if p and q:
return p.val == q.val and self.isSameTree(p.left, q.left) and self.isSameTree(p.right, q.right)
return p is q
# [1, null, 2, 4, null, null, 3]
# [1, null, 4, 2, null, null, 3]
if __name__ == '__main__':
t1 = TreeNode(1, right=TreeNode(2, left=TreeNode(2, right=TreeNode(3))))
t2 = TreeNode(1, right=TreeNode(4, left=TreeNode(4, right=TreeNode(3))))
print(Solution().isSameTree(t1, t2))
|
19,384 | 0efca8da31999a9110a835ff38e1f7e60f18fee0 | from django.apps import AppConfig
class BasicNotificationsConfig(AppConfig):
name = 'basic_notifications'
|
19,385 | d4ad2b20c186fbab8ca756fbcec14bb855b4a723 | # -*- coding: utf-8 -*-
"""
Created on Thu Oct 11 13:13:45 2018
@author: L1817
"""
import argparse
import pickle
import numpy as np
import matplotlib.pyplot as plt
import time
import os
import preprocess_data, utilities, hydro, hydro_utils, read
plt.close("all")
"""
Parse command-line arguments
"""
parser = argparse.ArgumentParser(description='Run hydro without any optimization.')
parser.add_argument('-d','--days', default=3, help='(int) Number of outermost iterations of the fipy solver, be it steadystate or transient. Default=10.', type=int)
parser.add_argument('-b','--nblocks', default=0, help='(int) Number of blocks to locate. Default=5.', type=int)
parser.add_argument('-n','--niter', default=1, help='(int) Number of repetitions of the whole computation. Default=10', type=int)
args = parser.parse_args()
DAYS = args.days
N_BLOCKS = args.nblocks
N_ITER = args.niter
"""
Read and preprocess data
"""
preprocessed_datafolder = r"data/Strat4"
dem_rst_fn = preprocessed_datafolder + r"/DTM_metres_clip.tif"
can_rst_fn = preprocessed_datafolder + r"/canals_clip.tif"
peat_depth_rst_fn = preprocessed_datafolder + r"/Peattypedepth_clip.tif" # peat depth, peat type in the same raster
abs_path_data = os.path.abspath('./data') # Absolute path to data folder needed for Excel file with parameters
params_fn = abs_path_data + r"/params.xlsx"
# Read rasters, build up canal connectivity adjacency matrix
if 'CNM' and 'cr' and 'c_to_r_list' not in globals(): # call only if needed
CNM, cr, c_to_r_list = preprocess_data.gen_can_matrix_and_raster_from_raster(can_rst_fn=can_rst_fn, dem_rst_fn=dem_rst_fn)
_ , dem, peat_type_arr, peat_depth_arr = preprocess_data.read_preprocess_rasters(can_rst_fn, dem_rst_fn, peat_depth_rst_fn, peat_depth_rst_fn)
# Read parameters
PARAMS_df = preprocess_data.read_params(params_fn)
BLOCK_HEIGHT = PARAMS_df.block_height[0]; CANAL_WATER_LEVEL = PARAMS_df.canal_water_level[0]
DIRI_BC = PARAMS_df.diri_bc[0]; HINI = PARAMS_df.hini[0];
ET = PARAMS_df.ET[0]; TIMESTEP = PARAMS_df.timeStep[0]; KADJUST = PARAMS_df.Kadjust[0]
P = read.read_precipitation() # precipitation read from separate historical data
ET = ET * np.ones(shape=P.shape)
# Even if maps say peat depth is less than 2 meters, the impermeable bottom is at most at 2m.
# This can potentially break the hydrological simulation if the WTD would go below 2m.
print(">>>>> WARNING, OVERWRITING PEAT DEPTH")
peat_depth_arr[peat_depth_arr < 2.] = 2.
# catchment mask: delimit the study area
catchment_mask = np.ones(shape=dem.shape, dtype=bool)
catchment_mask[np.where(dem<-10)] = False # -99999.0 is current value of dem for nodata points.
# 'peel' the dem. Dirichlet BC will be applied at the peel.
boundary_mask = utilities.peel_raster(dem, catchment_mask)
# after peeling, catchment_mask should only be the fruit:
catchment_mask[boundary_mask] = False
# soil types, soil physical properties and soil depth:
peat_type_masked = peat_type_arr * catchment_mask
peat_bottom_elevation = - peat_depth_arr * catchment_mask # meters with respect to dem surface. Should be negative!
# Load peatmap soil types' physical properties dictionary.
# Kadjust is hydraulic conductivity multiplier for sapric peat
h_to_tra_and_C_dict, K = hydro_utils.peat_map_interp_functions(Kadjust=KADJUST)
# Transmissivity and storage are computed as: T(h) = T(h) - T(peat depth).
# These quantities are the latter
tra_to_cut = hydro_utils.peat_map_h_to_tra(soil_type_mask=peat_type_masked,
gwt=peat_bottom_elevation, h_to_tra_and_C_dict=h_to_tra_and_C_dict)
sto_to_cut = hydro_utils.peat_map_h_to_sto(soil_type_mask=peat_type_masked,
gwt=peat_bottom_elevation, h_to_tra_and_C_dict=h_to_tra_and_C_dict)
sto_to_cut = sto_to_cut * catchment_mask.ravel()
# Water level in canals and list of pixels in canal network.
srfcanlist =[dem[coords] for coords in c_to_r_list]
n_canals = len(c_to_r_list)
oWTcanlist = [x - CANAL_WATER_LEVEL for x in srfcanlist]
hand_made_dams = True # compute performance of cherry-picked locations for dams.
"""
MonteCarlo
"""
for i in range(0,N_ITER):
if i==0: # random block configurations
damLocation = np.random.randint(1, n_canals, N_BLOCKS).tolist() # Generate random kvector. 0 is not a good position in c_to_r_list
else:
prohibited_node_list = [i for i,_ in enumerate(oWTcanlist[1:]) if oWTcanlist[1:][i] < wt_canals[1:][i]] # [1:] is to take the 0th element out of the loop
candidate_node_list = np.array([e for e in range(1, n_canals) if e not in prohibited_node_list]) # remove 0 from the range of possible canals
damLocation = np.random.choice(candidate_node_list, size=N_BLOCKS)
if hand_made_dams:
# HAND-MADE RULE OF DAM POSITIONS TO ADD:
hand_picked_dams = (11170, 10237, 10514, 2932, 4794, 8921, 4785, 5837, 7300, 6868) # rule-based approach
hand_picked_dams = [6959, 901, 945, 9337, 10089, 7627, 1637, 7863, 7148, 7138, 3450, 1466, 420, 4608, 4303, 6908, 9405, 8289, 7343, 2534, 9349, 6272, 8770, 2430, 2654, 6225, 11152, 118, 4013, 3381, 6804, 6614, 7840, 9839, 5627, 3819, 7971, 402, 6974, 7584, 3188, 8316, 1521, 856, 770, 6504, 707, 5478, 5512, 1732, 3635, 1902, 2912, 9220, 1496, 11003, 8371, 10393, 2293, 4901, 5892, 6110, 2118, 4485, 6379, 10300, 6451, 5619, 9871, 9502, 1737, 4368, 7290, 9071, 11222, 3085, 2013, 5226, 597, 5038]
damLocation = hand_picked_dams
wt_canals = utilities.place_dams(oWTcanlist, srfcanlist, BLOCK_HEIGHT, damLocation, CNM)
"""
#########################################
HYDROLOGY
#########################################
"""
ny, nx = dem.shape
dx = 1.; dy = 1. # metres per pixel (Actually, pixel size is 100m x 100m, so all units have to be converted afterwards)
boundary_arr = boundary_mask * (dem - DIRI_BC) # constant Dirichlet value in the boundaries
ele = dem * catchment_mask
# Get a pickled phi solution (not ele-phi!) computed before without blocks, independently,
# and use it as initial condition to improve convergence time of the new solution
retrieve_transient_phi_sol_from_pickled = False
if retrieve_transient_phi_sol_from_pickled:
with open(r"pickled/transient_phi_sol.pkl", 'r') as f:
phi_ini = pickle.load(f)
print("transient phi solution loaded as initial condition")
else:
phi_ini = ele + HINI #initial h (gwl) in the compartment.
phi_ini = phi_ini * catchment_mask
wt_canal_arr = np.zeros((ny,nx)) # (nx,ny) array with wt canal height in corresponding nodes
for canaln, coords in enumerate(c_to_r_list):
if canaln == 0:
continue # because c_to_r_list begins at 1
wt_canal_arr[coords] = wt_canals[canaln]
dry_peat_volume, wt_track_drained, wt_track_notdrained, avg_wt_over_time = hydro.hydrology('transient', nx, ny, dx, dy, DAYS, ele, phi_ini, catchment_mask, wt_canal_arr, boundary_arr,
peat_type_mask=peat_type_masked, httd=h_to_tra_and_C_dict, tra_to_cut=tra_to_cut, sto_to_cut=sto_to_cut,
diri_bc=DIRI_BC, neumann_bc = None, plotOpt=True, remove_ponding_water=True,
P=P, ET=ET, dt=TIMESTEP)
water_blocked_canals = sum(np.subtract(wt_canals[1:], oWTcanlist[1:]))
cum_Vdp_nodams = 21088.453521509597 # Value of dry peat volume without any blocks, without any precipitation for 3 days. Normalization.
print('dry_peat_volume(%) = ', dry_peat_volume/cum_Vdp_nodams * 100. , '\n',
'water_blocked_canals = ', water_blocked_canals)
"""
Final printings
"""
fname = r'output/results_mc_3_cumulative.txt'
if N_ITER > 20: # only if big enough number of simulated days
with open(fname, 'a') as output_file:
output_file.write(
"\n" + str(i) + " " + str(dry_peat_volume) + " "
+ str(N_BLOCKS) + " " + str(N_ITER) + " " + str(DAYS) + " "
+ str(time.ctime()) + " " + str(water_blocked_canals)
)
"""
Save WTD data if simulating a year
"""
fname = r'output/wtd_year_' + str(N_BLOCKS) + '.txt'
if DAYS > 300:
with open(fname, 'a') as output_file:
output_file.write("\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%\n" +
str(time.ctime()) + " nblocks = " + str(N_BLOCKS) + " ET = " + str(ET[0]) +
'\n' + 'drained notdrained mean'
)
for i in range(len(wt_track_drained)):
output_file.write( "\n" + str(wt_track_drained[i]) + " " + str(wt_track_notdrained[i]) + " " + str(avg_wt_over_time[i]))
plt.figure()
plt.plot(list(range(0,DAYS)), wt_track_drained, label='close to drained')
plt.plot(list(range(0,DAYS)), wt_track_notdrained, label='away from drained')
plt.plot(list(range(0,DAYS)), avg_wt_over_time, label='average')
plt.xlabel('time(days)'); plt.ylabel('WTD (m)')
plt.legend()
plt.show() |
19,386 | 06490fc65b9395eeda08cecbf6c9d7e4bd45a01f | #!/usr/bin/env python3
"""
"""
__author__ = "Peter Kleiweg"
__version__ = "0.01"
__date__ = "2010/11/18"
#| imports
import cgitb; cgitb.enable(format="html")
import codecs, cgi, os, re, sys
import u.html, u.myCgi
from u.config import appdir
#| globals
#| functions
def decode(data):
if data.startswith(codecs.BOM_UTF8):
enc = 'utf-8-sig'
elif data.startswith(codecs.BOM_UTF16_BE) or data.startswith(codecs.BOM_UTF16_LE):
enc = 'utf-16'
else:
enc = 'us-ascii'
if data.find(b'\n') >= 0:
splitter = b'\n'
else:
splitter = b'\r'
for line in data.split(splitter):
if enc == 'us-ascii':
if re.match(br'[\x00-\x7F]*$', line):
continue
else:
enc = 'utf-8'
if enc == 'utf-8':
if re.match(br'([\x00-\x7F]|[\xC0-\xDF][\x80-\xBF]|[\xE0-\xEF][\x80-\xBF]{2}|[\xF0-\xF7][\x80-\xBF]{3}|[\xF8-\xFB][\x80-\xBF]{4}|[\xFC-\xFD][\x80-\xBF]{5})*$', line):
continue
else:
enc = 'iso-8859-1' # fall-back
break
if enc.startswith('utf') and enc != 'utf-8':
try:
data = data.decode(enc).encode('utf-8')
enc = 'utf-8'
except:
u.html.exitMessage('Error', 'Decoding of data failed.')
j = len(data)
for sp in [b'\n\r', b'\n', b'\r\n', b'\r']:
i = data.find(sp)
if i >= 0 and i < j:
splitter = sp
j = i
return [x.decode(enc).replace('\n', '').replace('\r', '') for x in data.rstrip(splitter).split(splitter)]
def translate(s):
s2 = ''
s1 = ''
while s:
m = RE.match(s)
ss = m.group()
if not ss in tr:
return True, '{}<span style="color:#FF0000">{}</span>'.format(u.html.escape(s1), u.html.escape(s))
s = s[len(ss):]
s1 += ss
s2 += tr[ss]
return False, s2
def hex2chr(m):
return '{:c}'.format(int(m.group(1), 16))
def code2unicode(s):
return re.sub(r'U\+([0-9A-Fa-f]{4})', hex2chr, s)
#| main
data = u.myCgi.data.get('data', '')
if not data:
u.html.exitMessage('Error', 'Missing or empty data file')
data = decode(data)
trans = u.myCgi.data.get('trans', '')
if trans:
trans = decode(trans)
else:
fp = open('{}tools/XSampaTable.txt'.format(appdir), 'rt', encoding='utf-8')
trans = fp.readlines()
fp.close()
tr = {}
for line in trans:
a = line.split()
if len(a) < 2:
continue
if a[0][0] == '#':
continue
tr[code2unicode(a[0])] = code2unicode(a[1])
k = list(tr.keys())
k.sort(reverse=True)
RE = re.compile('|'.join([re.escape(x) for x in k]) + '|.')
result = []
errors = []
state = 0
lineno = 0
for line in data:
lineno += 1
if not line or line[0] == '#':
result.append(line)
continue
items = line.split('\t')
if state == 0:
if not items[0]:
items = items[1:]
itemNames = items
nItems = len(items)
result.append(line)
state = 1
else:
try:
lbl = items[0]
except:
lbl = ''
cells = items[1:]
if len(cells) != nItems:
errors.append('Line {} "{}" has {} data cells (should be {})'.format(
lineno, u.html.escape(lbl), len(cells), nItems))
continue
for i in range(nItems):
values = []
for value in cells[i].split(' / '):
value = re.sub('^(/ +)*(.*?)( +/)*$', '\\2', value)
if not value:
continue
err, txt = translate(value)
if err:
errors.append('Parse error for "{}" - "{}", unknown token: {}'.format(
u.html.escape(lbl), u.html.escape(itemNames[i]), txt))
else:
values.append(txt)
cells[i] = ' / '.join(values)
result.append(lbl + '\t' + '\t'.join(cells))
if errors:
if len(errors) == 1:
e = 'Error'
else:
e = 'Errors'
u.html.exitMessage(e, '<ul>\n<li>' + '\n<li>'.join(errors) + '\n</ul>')
if u.myCgi.data.get('outenc', '') == b'utf16':
enc = 'utf-16'
else:
enc = 'utf-8'
fp = sys.stdout.detach()
fp.write('''Content-type: text/plain; charset={}
Content-Disposition: attachment; filename="nameless.txt"
'''.format(enc).encode('us-ascii'))
fp.write(('\n'.join(result) + '\n').encode(enc))
|
19,387 | 7e73f82a0c1883c843840e290780c671f6909c4d | from fractions import gcd
from functools import reduce
def lcm(x, y):
return (x * y) // gcd(x, y)
def lcm_list(nums):
return reduce(lcm, nums, 1)
N = int(input())
A = list(map(int, input().split()))
x = lcm_list(A) - 1
print(sum(x % a for a in A))
|
19,388 | f1227f371c0e40ca3ce822cb4111bff48d552444 | # Generated by Django 2.0.6 on 2018-07-31 03:39
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('blog', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='post',
name='created_date',
field=models.DateField(default=datetime.datetime(2018, 7, 31, 3, 39, 33, 950990, tzinfo=utc)),
),
]
|
19,389 | 20588910fae3fde413ef20d3a8ced7ee6e80b1f0 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import unittest
from distutils.version import LooseVersion
import numpy as np
import pandas as pd
from pandas.api.types import CategoricalDtype
import pyspark.pandas as ps
from pyspark.testing.pandasutils import ComparisonTestBase, TestUtils
class CategoricalTestsMixin:
@property
def pdf(self):
return pd.DataFrame(
{
"a": pd.Categorical([1, 2, 3, 1, 2, 3]),
"b": pd.Categorical(
["b", "a", "c", "c", "b", "a"], categories=["c", "b", "d", "a"]
),
},
)
@property
def df_pair(self):
return self.pdf, self.psdf
def test_categorical_frame(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf, pdf)
self.assert_eq(psdf.a, pdf.a)
self.assert_eq(psdf.b, pdf.b)
self.assert_eq(psdf.index, pdf.index)
self.assert_eq(psdf.sort_index(), pdf.sort_index())
self.assert_eq(psdf.sort_values("b"), pdf.sort_values("b"))
def test_categorical_series(self):
pser = pd.Series([1, 2, 3], dtype="category")
psser = ps.Series([1, 2, 3], dtype="category")
self.assert_eq(psser, pser)
self.assert_eq(psser.cat.categories, pser.cat.categories)
self.assert_eq(psser.cat.codes, pser.cat.codes)
self.assert_eq(psser.cat.ordered, pser.cat.ordered)
with self.assertRaisesRegex(ValueError, "Cannot call CategoricalAccessor on type int64"):
ps.Series([1, 2, 3]).cat
def test_categories_setter(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
pser = pser.cat.rename_categories(["z", "y", "x"])
psser = psser.cat.rename_categories(["z", "y", "x"])
self.assert_eq(pser, psser)
self.assert_eq(pdf, psdf)
with self.assertRaises(ValueError):
psser.cat.categories = [1, 2, 3, 4]
def test_add_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.add_categories(4), psser.cat.add_categories(4))
self.assert_eq(pser.cat.add_categories([4, 5]), psser.cat.add_categories([4, 5]))
self.assert_eq(pser.cat.add_categories([]), psser.cat.add_categories([]))
pser = pser.cat.add_categories(4)
psser = psser.cat.add_categories(4)
self.assertRaises(ValueError, lambda: psser.cat.add_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.add_categories([5, 5]))
def test_remove_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_categories(2), psser.cat.remove_categories(2))
self.assert_eq(pser.cat.remove_categories([1, 3]), psser.cat.remove_categories([1, 3]))
self.assert_eq(pser.cat.remove_categories([]), psser.cat.remove_categories([]))
self.assert_eq(pser.cat.remove_categories([2, 2]), psser.cat.remove_categories([2, 2]))
self.assert_eq(
pser.cat.remove_categories([1, 2, 3]), psser.cat.remove_categories([1, 2, 3])
)
self.assert_eq(pser.cat.remove_categories(None), psser.cat.remove_categories(None))
self.assert_eq(pser.cat.remove_categories([None]), psser.cat.remove_categories([None]))
self.assertRaises(ValueError, lambda: psser.cat.remove_categories(4))
self.assertRaises(ValueError, lambda: psser.cat.remove_categories([4, None]))
def test_remove_unused_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
pser = pser.cat.add_categories(4)
pser = pser.cat.remove_categories(2)
psser = psser.cat.add_categories(4)
psser = psser.cat.remove_categories(2)
self.assert_eq(pser.cat.remove_unused_categories(), psser.cat.remove_unused_categories())
def test_reorder_categories(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
self.assert_eq(
pser.cat.reorder_categories([1, 2, 3]), psser.cat.reorder_categories([1, 2, 3])
)
self.assert_eq(
pser.cat.reorder_categories([1, 2, 3], ordered=True),
psser.cat.reorder_categories([1, 2, 3], ordered=True),
)
self.assert_eq(
pser.cat.reorder_categories([3, 2, 1]), psser.cat.reorder_categories([3, 2, 1])
)
self.assert_eq(
pser.cat.reorder_categories([3, 2, 1], ordered=True),
psser.cat.reorder_categories([3, 2, 1], ordered=True),
)
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2]))
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2, 4]))
self.assertRaises(ValueError, lambda: psser.cat.reorder_categories([1, 2, 2]))
self.assertRaises(TypeError, lambda: psser.cat.reorder_categories(1))
self.assertRaises(TypeError, lambda: psdf.b.cat.reorder_categories("abcd"))
def test_as_ordered_unordered(self):
pdf, psdf = self.df_pair
pser = pdf.a
psser = psdf.a
# as_ordered
self.assert_eq(pser.cat.as_ordered(), psser.cat.as_ordered())
# as_unordered
self.assert_eq(pser.cat.as_unordered(), psser.cat.as_unordered())
def test_astype(self):
pser = pd.Series(["a", "b", "c"])
psser = ps.from_pandas(pser)
self.assert_eq(psser.astype("category"), pser.astype("category"))
self.assert_eq(
psser.astype(CategoricalDtype(["c", "a", "b"])),
pser.astype(CategoricalDtype(["c", "a", "b"])),
)
pcser = pser.astype(CategoricalDtype(["c", "a", "b"]))
pscser = psser.astype(CategoricalDtype(["c", "a", "b"]))
self.assert_eq(pscser.astype("category"), pcser.astype("category"))
# CategoricalDtype is not updated if the dtype is same from pandas 1.3.
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
self.assert_eq(
pscser.astype(CategoricalDtype(["b", "c", "a"])),
pcser.astype(CategoricalDtype(["b", "c", "a"])),
)
else:
self.assert_eq(
pscser.astype(CategoricalDtype(["b", "c", "a"])),
pcser,
)
self.assert_eq(pscser.astype(str), pcser.astype(str))
def test_factorize(self):
pser = pd.Series(["a", "b", "c", None], dtype=CategoricalDtype(["c", "a", "d", "b"]))
psser = ps.from_pandas(pser)
pcodes, puniques = pser.factorize()
kcodes, kuniques = psser.factorize()
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
pcodes, puniques = pser.factorize(use_na_sentinel=-2)
kcodes, kuniques = psser.factorize(use_na_sentinel=-2)
self.assert_eq(kcodes.tolist(), pcodes.tolist())
self.assert_eq(kuniques, puniques)
def test_frame_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.apply(lambda x: x).sort_index(), pdf.apply(lambda x: x).sort_index())
self.assert_eq(
psdf.apply(lambda x: x, axis=1).sort_index(),
pdf.apply(lambda x: x, axis=1).sort_index(),
)
def test_frame_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply()
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c"])
def categorize(ser) -> ps.Series[dtype]:
return ser.astype(dtype)
self.assert_eq(
psdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
pdf.apply(categorize).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(psdf.transform(lambda x: x), pdf.transform(lambda x: x))
self.assert_eq(psdf.transform(lambda x: x.cat.codes), pdf.transform(lambda x: x.cat.codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.transform(lambda x: x.astype(dtype)).sort_index(),
pdf.transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_frame_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform()
pdf, psdf = self.df_pair
def codes(pser) -> ps.Series[np.int8]:
return pser.cat.codes
self.assert_eq(psdf.transform(codes), pdf.transform(codes))
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.transform(to_category).sort_index(), pdf.transform(to_category).sort_index()
)
def test_series_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.apply(lambda x: x).sort_index(), pdf.a.apply(lambda x: x).sort_index()
)
def test_series_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_apply()
pdf, psdf = self.df_pair
ret = psdf.a.dtype
def identity(pser) -> ret:
return pser
self.assert_eq(psdf.a.apply(identity).sort_index(), pdf.a.apply(identity).sort_index())
# TODO: The return type is still category.
# def to_str(x) -> str:
# return str(x)
#
# self.assert_eq(
# psdf.a.apply(to_str).sort_index(), pdf.a.apply(to_str).sort_index()
# )
def test_groupby_apply(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").apply(lambda df: df).sort_index(),
pdf.groupby("a").apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
pdf.groupby("b").apply(lambda df: df[["a"]]).sort_index(),
)
self.assert_eq(
psdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
pdf.groupby(["a", "b"]).apply(lambda df: df).sort_index(),
)
self.assert_eq(
psdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
pdf.groupby("a").apply(lambda df: df.b.cat.codes).sort_index(),
)
self.assert_eq(
psdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
pdf.groupby("a")["b"].apply(lambda b: b.cat.codes).sort_index(),
)
# TODO: grouping by a categorical type sometimes preserves unused categories.
# self.assert_eq(
# psdf.groupby("a").apply(len).sort_index(), pdf.groupby("a").apply(len).sort_index(),
# )
def test_groupby_apply_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_apply()
pdf, psdf = self.df_pair
def identity(df) -> ps.DataFrame[zip(psdf.columns, psdf.dtypes)]:
return df
self.assert_eq(
psdf.groupby("a").apply(identity).sort_values(["b"]).reset_index(drop=True),
pdf.groupby("a").apply(identity).sort_values(["b"]).reset_index(drop=True),
)
def test_groupby_transform(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.groupby("a").transform(lambda x: x).sort_index(),
pdf.groupby("a").transform(lambda x: x).sort_index(),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
pdf.groupby("a").transform(lambda x: x.astype(dtype)).sort_index(),
)
def test_groupby_transform_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_groupby_transform()
pdf, psdf = self.df_pair
def identity(x) -> ps.Series[psdf.b.dtype]:
return x
self.assert_eq(
psdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(identity).sort_values("b").reset_index(drop=True),
)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
# The behavior for CategoricalDtype is changed from pandas 1.3
if LooseVersion(pd.__version__) >= LooseVersion("1.3"):
ret_dtype = pdf.b.dtype
else:
ret_dtype = dtype
def astype(x) -> ps.Series[ret_dtype]:
return x.astype(dtype)
if LooseVersion(pd.__version__) >= LooseVersion("1.2"):
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
pdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
)
else:
expected = pdf.groupby("a").transform(astype)
expected["b"] = dtype.categories.take(expected["b"].cat.codes).astype(dtype)
self.assert_eq(
psdf.groupby("a").transform(astype).sort_values("b").reset_index(drop=True),
expected.sort_values("b").reset_index(drop=True),
)
def test_frame_apply_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.apply_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
def test_frame_apply_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_apply_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_str).sort_values(["a", "b"]).reset_index(drop=True),
to_str(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.apply_batch(to_category)
.sort_values(["a", "b"])
.reset_index(drop=True),
to_category(pdf).sort_values(["a", "b"]).reset_index(drop=True),
)
def test_frame_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(str)).sort_index(),
pdf.astype(str).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.cat.codes).sort_index(),
pdf.b.cat.codes.sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.astype(dtype)).sort_index(),
pdf.astype(dtype).sort_index(),
)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(lambda pdf: pdf.b.astype(dtype)).sort_index(),
pdf.b.astype(dtype).sort_index(),
)
def test_frame_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_frame_transform_batch()
pdf, psdf = self.df_pair
def to_str(pdf) -> 'ps.DataFrame["a":str, "b":str]': # noqa: F405
return pdf.astype(str)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_str).sort_index(),
to_str(pdf).sort_index(),
)
def to_codes(pdf) -> ps.Series[np.int8]:
return pdf.b.cat.codes
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_codes).sort_index(),
to_codes(pdf).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
ret = ps.DataFrame["a":dtype, "b":dtype]
def to_category(pdf) -> ret:
return pdf.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).sort_index(),
)
def to_category(pdf) -> ps.Series[dtype]:
return pdf.b.astype(dtype)
self.assert_eq(
psdf.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf).rename().sort_index(),
)
def test_series_transform_batch(self):
pdf, psdf = self.df_pair
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(str)).sort_index(),
pdf.a.astype(str).sort_index(),
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(lambda pser: pser.astype(dtype)).sort_index(),
pdf.a.astype(dtype).sort_index(),
)
def test_series_transform_batch_without_shortcut(self):
with ps.option_context("compute.shortcut_limit", 0):
self.test_series_transform_batch()
pdf, psdf = self.df_pair
def to_str(pser) -> ps.Series[str]:
return pser.astype(str)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_str).sort_index(), to_str(pdf.a).sort_index()
)
pdf = pd.DataFrame(
{"a": ["a", "b", "c", "a", "b", "c"], "b": ["b", "a", "c", "c", "b", "a"]}
)
psdf = ps.from_pandas(pdf)
dtype = CategoricalDtype(categories=["a", "b", "c", "d"])
def to_category(pser) -> ps.Series[dtype]:
return pser.astype(dtype)
self.assert_eq(
psdf.a.pandas_on_spark.transform_batch(to_category).sort_index(),
to_category(pdf.a).sort_index(),
)
def test_unstack(self):
pdf = self.pdf
index = pd.MultiIndex.from_tuples(
[("x", "a"), ("x", "b"), ("x", "c"), ("y", "a"), ("y", "b"), ("y", "d")]
)
pdf.index = index
psdf = ps.from_pandas(pdf)
self.assert_eq(psdf.a.unstack().sort_index(), pdf.a.unstack().sort_index())
self.assert_eq(psdf.b.unstack().sort_index(), pdf.b.unstack().sort_index())
def test_rename_categories(self):
pdf, psdf = self.df_pair
pser = pdf.b
psser = psdf.b
self.assert_eq(
pser.cat.rename_categories([0, 1, 3, 2]), psser.cat.rename_categories([0, 1, 3, 2])
)
self.assert_eq(
pser.cat.rename_categories({"a": "A", "c": "C"}),
psser.cat.rename_categories({"a": "A", "c": "C"}),
)
self.assert_eq(
pser.cat.rename_categories(lambda x: x.upper()),
psser.cat.rename_categories(lambda x: x.upper()),
)
self.assertRaisesRegex(
ValueError,
"new categories need to have the same number of items as the old categories",
lambda: psser.cat.rename_categories([0, 1, 2]),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories(None),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories(1),
)
self.assertRaises(
TypeError,
lambda: psser.cat.rename_categories("x"),
)
def test_set_categories(self):
pdf, psdf = self.df_pair
pser = pdf.b
psser = psdf.b
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "o"]),
psser.cat.set_categories(["a", "c", "b", "o"]),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b"]),
psser.cat.set_categories(["a", "c", "b"]),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "d", "e"]),
psser.cat.set_categories(["a", "c", "b", "d", "e"]),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3, 2], rename=True),
psser.cat.set_categories([0, 1, 3, 2], rename=True),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3], rename=True),
psser.cat.set_categories([0, 1, 3], rename=True),
)
self.assert_eq(
pser.cat.set_categories([0, 1, 3, 2, 4], rename=True),
psser.cat.set_categories([0, 1, 3, 2, 4], rename=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "o"], ordered=True),
psser.cat.set_categories(["a", "c", "b", "o"], ordered=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b"], ordered=True),
psser.cat.set_categories(["a", "c", "b"], ordered=True),
)
self.assert_eq(
pser.cat.set_categories(["a", "c", "b", "d", "e"], ordered=True),
psser.cat.set_categories(["a", "c", "b", "d", "e"], ordered=True),
)
self.assertRaisesRegex(
TypeError,
"Parameter 'new_categories' must be list-like, was",
lambda: psser.cat.set_categories(None),
)
class CategoricalTests(CategoricalTestsMixin, ComparisonTestBase, TestUtils):
pass
if __name__ == "__main__":
import unittest
from pyspark.pandas.tests.test_categorical import * # noqa: F401
try:
import xmlrunner
testRunner = xmlrunner.XMLTestRunner(output="target/test-reports", verbosity=2)
except ImportError:
testRunner = None
unittest.main(testRunner=testRunner, verbosity=2)
|
19,390 | f5cd5c28bc0c7c7892ddd1c25bffa9e65d5fa8c4 | randomList = ['Test', 'Random', 'Orange', 'Blue', 'Rabbit', 'Best']
search = input('Search: ')
#Without lambda
woLambdaList = []
for item in randomList:
if search.lower() in item.lower():
woLambdaList.append(item)
print('Without lambda: ', woLambdaList)
#With lambda
lambdaList = list(filter(lambda i: search.lower() in i.lower(),randomList))
print('With lambda: ', lambdaList) |
19,391 | 23f0ee3e9d6f7964bd6e645869629ee062fe6c16 | # Generated by Django 3.0.1 on 2019-12-30 18:13
from datetime import datetime
from django.db import migrations
def create_packages_statistic(apps, schema_editor):
Package = apps.get_model('post', 'Package')
for i in range(1000, 2000):
p = Package(
id=i,
sender_name=f"Khokhlova {i}",
receiver_name=f"Magic {i}",
tracking_number=f"{i}",
address_from="R'lyeh",
address_to="Providence, Rhode Island, U.S.",
price=42,
value=42,
weight=42,
accepted_at=datetime.now(),
delivered_at=None,
)
p.save()
if not i % 19:
p.status = 'DV'
elif not i % 9:
p.status = 'RJ'
elif not i % 2:
p.status = 'TR'
p.save()
def delete_packages_statistic(apps, schema_editor):
Package = apps.get_model('post', 'Package')
for i in range(1000, 2000):
Package.objects.filter(id=i).delete()
class Migration(migrations.Migration):
dependencies = [
('post', '0003_auto_20191225_2019'),
]
operations = [
migrations.RunPython(create_packages_statistic, delete_packages_statistic)
]
|
19,392 | ee61eb6603428c32ca9d04b290cd8dce532c3de3 | # -*- coding:utf-8 -*-
import random
def generate_enum_dict(type_list):
type_dict = {}
for tid in xrange(len(type_list)):
type_dict[type_list[tid]] = tid
return type_dict
mock_data = {}
type_list = [
'welcome', #0
'login', #1
'register', #2
'cat_guide', #3
'news_homepage', #4
'cat_homepage', #5
'my_homepage', #6
'search', #7
'my_fav', #8
'handle_fav', #9
'my_rss', #10
'handle_rss', #11
'source_guide', #12
'source_homepage', #13
'rss_homepage', #14
'handle_share', #15
'handle_like', #16
'handle_fav_set', #17
'fav_homepage', #18
]
type_dict = generate_enum_dict(type_list)
'''=====================欢迎模块====================='''
welcome_request = {
"type":type_dict['welcome'],
"qid":"mock"
}
welcome_response = {
"qid":"mock",
"img_url":"https://ss0.bdstatic.com/5aV1bjqh_Q23odCf/static/superman/img/logo/bd_logo1_31bdc765.png",
}
mock_data[type_dict['welcome']] = welcome_response
'''=====================登录模块====================='''
user_cat_list = [
'wechat',
'weibo',
'qq',
'phone',
]
login_cat_dict = generate_enum_dict(user_cat_list)
login_request = {
"type":type_dict['login'],
"qid":"mock",
"username":"testname",
"passwd":"123456",
"category":login_cat_dict['phone'],
}
res_code_list = [
'success',
'wrong_username_or_password',
'user_not_exist',
'user_is_exist',
'fav_handle_success',
'fav_handle_fail',
'rss_handle_success',
'rss_handle_fail',
]
res_code_dict = generate_enum_dict(res_code_list)
login_response = {
"qid":"mock",
"rescode":res_code_dict['success'],
"username":"mockname",
"uid":111222,
"others":"",
}
mock_data[type_dict['login']] = login_response
'''=====================注册模块====================='''
register_request = {
"type":type_dict['register'],
"qid":"mock",
"username":"mockname",
"passwd":"123456",
"data":"location=Hangzhou&sex=Male",
"category":login_cat_dict['phone'],
}
register_response = {
"qid":"mock",
"rescode":res_code_dict['user_is_exist'],
"username":"mockname",
"uid":111222,
"others":"",
}
mock_data[type_dict['register']] = register_response
'''=====================引导分类模块====================='''
cat_guide_request = {
"type":type_dict['cat_guide'],
"qid":"mock",
"uid":111222,
}
cat_example = {
"cat_id":123,
"title":"初中",
"sub_cat":[
{
"cat_id":"123001",
"title":"中考",
"last_title":"最新中考资讯大全",
"last_modify":"2016-03-26 23:00",
"bg_image":"http://test.image.com",
},
],
}
cat_guide_response = {
"qid":"mock",
"cat_list":[cat_example,cat_example,cat_example],
}
mock_data[type_dict['cat_guide']] = cat_guide_response
'''=====================分类主页模块====================='''
cat_homepage_request = {
"type":type_dict['cat_homepage'],
"qid":"mock",
"cat_id":"123",
"page":1,
"page_max_cnt":10,
"uid":111222,
}
doc_example = {
"doc_id":12321,
"title":"中考",
"author":"mockauthor",
"datetime":"2016-03-20 20:00:00",
"source":"Sina",
"bg_image":"http://background.image.com/",
"text":"This is sample text",
"source_icon":"http://icon.image.com/",
"source_desc":"百度",
"share_cnt":22,
"collection_cnt":88,
"like_cnt":100,
}
example_doc_list = []
for i in xrange(10):
example_doc_list.append(doc_example)
cat_homepage_response = {
"qid":"mock",
"end":random.choice([0,1]),
"doc_list":example_doc_list,
}
mock_data[type_dict['cat_homepage']] = cat_homepage_response
'''=====================新闻内容模块====================='''
res_code_dict = generate_enum_dict(res_code_list)
news_homepage_request = {
"type":type_dict['news_homepage'],
"qid":"mock",
"doc_id":123,
#type=cat&cat_id=321 or type=fav&fav_id=111 or type=source&source_name=sina
"list_type":"rss",
"list_desc":"10001",
"uid":111222,
}
news_homepage_response = {
"qid":"mock",
"text":'aaaaa<br>bbbbb<br><img src="http://text.image.com/" width=200 height=500>ccccc<br>',
"base":doc_example,
"pre_id":122,
"next_id":124,
"recommend_list":[doc_example,doc_example,doc_example,doc_example]
}
mock_data[type_dict['news_homepage']] = news_homepage_response
'''=====================我的主页模块====================='''
my_homepage_request = {
"type":type_dict['my_homepage'],
"qid":"mock",
"uid":111222,
}
my_homepage_response = {
"qid":"mock",
"portrait_url":"http://portrait.image.com",
"others":"",
}
mock_data[type_dict['my_homepage']] = my_homepage_response
'''=====================搜索发现模块====================='''
search_request = {
"type":type_dict['search'],
"qid":"mock",
"query":"中考",
"page":1,
"page_max_cnt":10,
"uid":111222,
}
search_response = {
"qid":"mock",
"end":random.choice([0,1]),
"doc_list":example_doc_list,
}
mock_data[type_dict['search']] = search_response
'''=====================收藏模块====================='''
fav_example = {
"fav_id":333,
"uid":111222,
"fav_name":"中考教育",
"doc_list":example_doc_list,
"fav_icon":"http://icon.fav.com",
}
my_fav_request = {
"type":type_dict['my_fav'],
"qid":"mock",
"uid":111222,
}
my_fav_response = {
"qid":"mock",
"fav_list":[fav_example,fav_example,fav_example],
}
mock_data[type_dict['my_fav']] = my_fav_response
opt_list = [
'add',
'remove',
]
opt_dict = generate_enum_dict(opt_list)
handle_fav_request = {
"type":type_dict['handle_fav'],
"qid":"mock",
"opt":opt_dict['add'],
"uid":111222,
"fav_id":333,
"doc_id":123,
}
handle_fav_response = {
"qid":"mock",
"rescode":res_code_dict['fav_handle_success'],
}
mock_data[type_dict['handle_fav']] = handle_fav_response
handle_fav_set_request = {
'type':type_dict['handle_fav_set'],
'qid':'mock',
'opt':opt_dict['add'],
'uid':111222,
'fav_id':'abc',
'fav_name':'高中教育',
}
handle_fav_set_response = {
'qid':'mock',
'rescode':res_code_dict['fav_handle_success'],
}
mock_data[type_dict['handle_fav_set']] = handle_fav_set_response
fav_homepage_request = {
'type':type_dict['fav_homepage'],
'qid':'mock',
'uid':10001,
'fav_id':'abc',
'page':1,
'page_max_cnt':10,
}
fav_homepage_response = {
'qid':'mock',
'end':random.choice([0,1]),
'doc_list':example_doc_list,
}
mock_data[type_dict['fav_homepage']] = fav_homepage_response
'''=====================订阅模块====================='''
my_rss_request = {
"type":type_dict['my_rss'],
"qid":"mock",
"uid":111222,
}
rss_cat_example = {
"cat_id":"123001",
"title":"中考",
"last_title":"最新中考资讯大全",
"last_modify":"2016-03-26 23:00",
"bg_image":"http://test.image.com",
}
my_rss_response = {
"qid":"mock",
"cat_list":[rss_cat_example, rss_cat_example, rss_cat_example],
}
mock_data[type_dict['my_rss']] = my_rss_response
handle_rss_request = {
"type":type_dict['handle_rss'],
"qid":"mock",
"opt":opt_dict['add'],
"uid":111222,
"cat_id":'111,222,333',
}
handle_rss_response = {
"qid":"mock",
"rescode":res_code_dict['rss_handle_success'],
}
mock_data[type_dict['handle_rss']] = handle_rss_response
rss_homepage_request = {
"type":type_dict['rss_homepage'],
"qid":"mock",
"uid":111222,
"page":1,
"page_max_cnt":10,
}
rss_homepage_response = {
"qid":"mock",
"end":random.choice([0,1]),
"doc_list":example_doc_list,
}
mock_data[type_dict['rss_homepage']] = rss_homepage_response
'''=====================新闻源模块====================='''
source_guide_request = {
"type":type_dict['source_guide'],
"qid":"mock",
"uid":111222,
}
source_example = {
'source':'sina',
'source_desc':'新浪新闻',
'source_icon':'http://icon.image.com/',
"last_title":"最新中考资讯大全",
"last_modify":"2016-03-26 23:00",
"bg_image":"http://test.image.com",
}
source_guide_response = {
"qid":"mock",
"source_list":[source_example, source_example, source_example],
}
mock_data[type_dict['source_guide']] = source_guide_response
source_homepage_request = {
"type":type_dict['source_homepage'],
"qid":"mock",
"source_name":"Sina",
"page":1,
"page_max_cnt":10,
"uid":111222,
}
source_homepage_response = {
"qid":"mock",
"end":random.choice([0,1]),
"doc_list":example_doc_list,
}
mock_data[type_dict['source_homepage']] = source_homepage_response
'''=====================分享统计模块====================='''
handle_share_request = {
"type":type_dict['handle_share'],
"qid":"mock",
"doc_id":100001,
"uid":111222,
}
handle_share_response = {
"qid":"mock",
"rescode":0,
}
mock_data[type_dict['handle_share']] = handle_share_response
'''=====================点赞统计模块====================='''
handle_like_request = {
"type":type_dict['handle_like'],
"qid":"mock",
"opt":opt_dict['add'],
"doc_id":100001,
"uid":10001,
}
handle_like_response = {
"qid":"mock",
"rescode":0,
}
mock_data[type_dict['handle_like']] = handle_like_response
|
19,393 | d462fd12111b67e511619d4dc12cf2f69af7f47a | def progress():
pass
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
@staticmethod
def info(text):
return '{}{}{}'.format(bcolors.OKBLUE, text, bcolors.ENDC)
@staticmethod
def status(text):
return '{}{}{}'.format(bcolors.OKCYAN, text, bcolors.ENDC)
@staticmethod
def fail(text):
return '{}{}{}'.format(bcolors.FAIL, text, bcolors.ENDC)
@staticmethod
def ok(text):
return '{}{}{}'.format(bcolors.OKGREEN, text, bcolors.ENDC)
@staticmethod
def warning(text):
return '{}{}{}'.format(bcolors.WARNING, text, bcolors.ENDC)
@staticmethod
def header(text):
return '{}{}{}'.format(bcolors.HEADER, text, bcolors.ENDC)
@staticmethod
def bold(text):
return '{}{}{}'.format(bcolors.Bold, text, bcolors.ENDC)
@staticmethod
def underline(text):
return '{}{}{}'.format(bcolors.UNDERLINE, text, bcolors.ENDC)
|
19,394 | 6ff4512cab852e41302d84373f0b86c9f55fea6e | from functools import wraps
from logging import getLogger
# Read the docs at https://github.com/eclipse/paho.mqtt.python
# because eclipse.org has outdated information, which does not include MQTTv5
from paho.mqtt.client import Client as MqttClient, MQTTMessage, MQTTv5, MQTT_CLEAN_START_FIRST_ONLY
from paho.mqtt.properties import Properties
from paho.mqtt.reasoncodes import ReasonCodes
from paho.mqtt.subscribeoptions import SubscribeOptions
log = getLogger(__name__)
class GenericMqttEndpoint:
def __init__(self, client_kwargs: dict, password_auth: dict, server_kwargs: dict, tls: bool):
"""
:param client_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L517
:param password_auth: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L1318
:param server_kwargs: See https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L913
:param tls: If true, enables TLS with https://github.com/eclipse/paho.mqtt.python/blob/9c22a9c297c0cdc4e1aac13aa19073e09a822961/src/paho/mqtt/client.py#L765
"""
self.mqtt_client_kwargs = client_kwargs
# Some features and parameters depend on this.
self.mqtt_client_kwargs.update(protocol=MQTTv5)
self.mqtt_tls = tls
self.mqtt_password_auth = password_auth
self.mqtt_server_kwargs = server_kwargs
# This is specific to MQTTv5 (MQTTv311 has clean_session in the client_kwargs instead)
self.mqtt_server_kwargs.update(clean_start=MQTT_CLEAN_START_FIRST_ONLY)
self._mqttc = MqttClient(**self.mqtt_client_kwargs)
if self.mqtt_tls:
self._mqttc.tls_set()
if self.mqtt_password_auth:
self._mqttc.username_pw_set(**self.mqtt_password_auth)
self._mqttc.on_connect = self._on_connect
self._mqttc.on_disconnect = self._on_disconnect
self._mqttc.on_message = self._on_message
self._mqttc.on_log = self._on_log
self._managed_subsciptions = dict()
"""
This dictionary maps subscription topics to subscription options
"""
for attribute in self.__class__.__dict__.values():
if hasattr(attribute, _SUBSCRIBE_DECORATOR_NAME):
decorated_function = attribute
topic_pattern, kwargs = getattr(decorated_function, _SUBSCRIBE_DECORATOR_NAME)
if topic_pattern in self._managed_subsciptions:
raise Exception(
"A client cannot subscribe to an identical topic filter multiple times!")
else:
self._managed_subsciptions[topic_pattern] = kwargs
# This function introduces a scope,
# to avoid a changing decorated_function variable
# cause changing behaviour of call_decorated_function
def create_caller(decorated_function):
# the decorated_function has not yet a self object; thus we need this wrapper
@wraps(decorated_function)
def call_decorated_function(client, userdata, message):
variables = unpack_topic(topic_pattern, message.topic)
return decorated_function(self, client=client, userdata=userdata, message=message, *variables)
return call_decorated_function
# this is done only once, not on every reconnect / resubscribe.
self._mqttc.message_callback_add(topic_pattern, create_caller(decorated_function))
def connect(self):
# currently, this will retry first connects, we don't need bettermqtt
self._mqttc.connect_async(**self.mqtt_server_kwargs)
self._mqttc.loop_start()
def _on_connect(self, client, userdata, flags, rc: ReasonCodes, properties: Properties = None):
if flags['session present'] == 0:
# This is a new session, and we need to resubscribe
self._subscribe()
elif flags['session present'] == 1:
pass
else:
raise Exception("Unknown Session Present Flag")
def _subscribe(self):
# Edge case: This may fail if we disconnect when not subscribed to all channels; there seems to a case where
# subscribe() returns an error code that we currently do handle.
# With some luck, the subscription stays in the packet queue.
# Other defaults are sane, we don't need Subscription Options
# However, if our session expires (after long-lasting conection loss),
# we will unexpectedly re-receive all retained messages
# which is not bad, if they are idempotent
# We MUST NOT add message callbacks here, otherwise, they may be added twice upon reconnect after session expiry
for topic_filter, kwargs in self._managed_subsciptions.items():
self._mqttc.subscribe(topic=topic_filter, **kwargs)
def _on_disconnect(self, client, userdata, rc: ReasonCodes, properties: Properties = None):
# Exceptions here seem to disrupt the automatic reconnect
# Connection loss can be tested with:
# sudo tc qdisc add dev lo root netem loss 100%
# sudo tc qdisc del dev lo root
pass
def _on_message(self, client, userdata, message: MQTTMessage):
message_dict = {attr: getattr(message, attr) for attr in dir(message) if not attr.startswith("_")}
message_properties: Properties = message.properties
message_properties_dict = {attr: getattr(message_properties, attr) for attr in dir(message_properties) if
not attr.startswith("_")}
def _on_log(self, client, userdata, level, buf):
log.log(level, buf, extra=dict(userdata=userdata))
@staticmethod
def subscribe_decorator(topic, **kwargs):
"""
This must be the outermost decorator (except for other similar nop-decorators)
Avoid overlapping subscriptions or handle duplicates.
Uses the same kwargs as paho.mqtt.client.Client.subscribe()
Try qos=2 or options=SubscriptionOptions()
Your function should have the signature func(var1, var2, vars, *, client,userdata,message)
with a positional variable for each + or # in the pattern
"""
def _subscribe_decorator(func):
setattr(func, _SUBSCRIBE_DECORATOR_NAME, (topic, kwargs))
# no @wraps
return func
return _subscribe_decorator
def publish(self, topic_pattern, *topic_data, **kwargs):
"""
:param topic_pattern: A topic pattern, e.g. a/+/c/#
:param topic_data: some elements matching the pattern, e.g. "b", ("d", "e")
:param kwargs: Passed to Client.publish(self, topic, payload=None, qos=0, retain=False, properties=None)
:return:
"""
topic = pack_topic(topic_pattern, *topic_data)
self._mqttc.publish(topic, **kwargs)
_SUBSCRIBE_DECORATOR_NAME = name = __name__ + "." + GenericMqttEndpoint.subscribe_decorator.__qualname__
FORBIDDEN_CHARS = "/+#"
def pack_topic(pattern: str, *data):
data = list(data)
while "+" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
element = data.pop(0)
check_data_is_sane(element)
pattern = pattern.replace("+", element, 1)
while "#" in pattern:
if not data:
raise Exception("Placeholder with no value to fill in")
remainder = data.pop(0)
if isinstance(remainder, str):
raise Exception("You should provide a list or a tuple to replace a '#', not a string.")
elements = list(remainder)
for element in elements:
check_data_is_sane(element)
pattern = pattern.replace("#", "/".join(elements), 1)
if data:
raise Exception("Unused placeholders are present")
return pattern
def check_data_is_sane(element):
for FORBIDDEN_CHAR in FORBIDDEN_CHARS:
if FORBIDDEN_CHAR in element:
raise Exception(f"Cannot fill in data containing a '{FORBIDDEN_CHAR}'")
def unpack_topic(pattern, topic):
"""
returns one string for each "+", followed by a list of strings when a trailing "#" is present
"""
pattern_parts = iter(pattern.split("/"))
topic_parts = iter(topic.split("/"))
while True:
try:
cur_pattern = next(pattern_parts)
except StopIteration:
try:
cur_topic = next(topic_parts)
raise Exception("The topic to be matched is longer than the pattern without an # suffix. "
"The first unmatched part is {!r}".format(cur_topic))
except StopIteration:
# no more elements in both sequences.
return
if cur_pattern == "#":
yield list(topic_parts)
try:
cur_pattern = next(pattern_parts)
raise Exception("The pattern has a component after a #: {!r}".format(cur_pattern))
except StopIteration:
# topic has been exhausted by list() enumeration, and pattern is empty, too.
return
else:
try:
cur_topic = next(topic_parts)
except StopIteration:
raise Exception("The topic lacks a component to match a non-#-component in the pattern.")
else:
if cur_pattern == "+":
yield cur_topic
elif "+" in cur_pattern:
raise Exception(
"The single-level wildcard can be used at any level in the Topic Filter, including first and last levels. Where it is used, it MUST occupy an entire level of the filter.")
elif "#" in cur_pattern:
raise Exception(
"The multi-level wildcard character MUST be specified either on its own or following a topic level separator. In either case it MUST be the last character specified in the Topic Filter.")
elif cur_pattern != cur_topic:
raise Exception(
"The pattern {!r} is no wildcard, and the topic {!r} differs.".format(cur_pattern, cur_topic))
else: # pattern == topic and neither contain a # or +
# we do not yield return constant non-wildcards.
continue
|
19,395 | 650864f41c6350de8813def4411e98a6c2a2ff31 | from alkira import Alkira
def main(q, i, p, params, tags):
q.logger.log('Creating bookmark %s - %s' % (params['name'], params['url']), 1)
alkira = Alkira(p.api)
alkira.createBookmark(params['name'], params['url'], params['order'])
params['result'] = True
def match(q, i, p, params, tags):
return True
|
19,396 | 12aa65804a752b92b9153e6c091d89b6066e1a8a | size = [5, 7, 300, 90, 24, 50, 75]
print("Hello, my name is Dieu and these are my sheep sizes: ")
print(size)
input()
# max_size = (max(size))
# print("Now my biggest sheep has size {0} let's shear it. ". format(max_size))
# input()
# x = size.index(max_size)
# size[x] = 8
# print ("After shearing, here is my flock: ")
# print (size)
# input()
loop = True
while loop:
for z in range (3):
a = 1
for i in range (len(size)):
print ("MONTH {0}".format(a))
size[i] += 50
print ("One month has passed, now here is my flock ")
print (size)
input()
max_size = (max(size))
print("Now my biggest sheep has size {0} let's shear it. ". format(max_size))
input()
x = size.index(max_size)
size[x] = 8
print ("After shearing, here is my flock: ")
print (size)
input()
a += 1
size[i] += 50
break
# total = sum(size)
# price = total * 2
# print("My flock has size in total: {0}".format(total))
# print("I would get {0} *$2 = {1}".format(total,price)) |
19,397 | 7ddd6973ac80fc92311796b3275fcd03f6b88f77 | sGenerateLightVersionForDebugging = False
# sGenerateLightVersionForDebugging = True
sDatasetFolder = "../dataset/hand/"
sDisplayDebugging = sGenerateLightVersionForDebugging
import cv2
import glob
import natsort
import json
from coco_util import *
def split_file(name):
splits = name.split('/')[-1]
splits = splits.split('.')
nameid = splits[0]
if splits[1] == "jpg" or splits[1] == "json":
splits = splits[0]
splits= splits.split('_')
return (splits[0], splits[1], splits[2])
elif splits[2] == "jpg" or splits[2] == "json":
splits = splits[1]
splits= splits.split('_')
return (nameid + splits[1], "01", splits[2])
print("Error")
stop
# 000015774_01_l.jpg
# Alexander_mouse_cat_rooster.flv_000115_l.jpg
def runHands(jsonOutput, imageAndAnnotFolder):
image_files = natsort.natsorted(glob.glob(imageAndAnnotFolder + "*.jpg"))
pt_files = natsort.natsorted(glob.glob(imageAndAnnotFolder + "*.json"))
print("Initial #annotations: " + str(len(image_files)))
# Consolidate data
hands_dict = dict()
for image_file, pt_file in zip(image_files, pt_files):
img_id, pid_ifile, hand_dir = split_file(image_file)
pt_id, pid_ptfile, pt_dir = split_file(pt_file)
if (img_id[1] != pt_id[1]) or (pid_ifile[1] != pid_ptfile[1]):
print("Error")
stop
if img_id not in hands_dict:
hands_dict[img_id] = dict()
hands_dict[img_id]["persons"] = dict()
hands_dict[img_id]["image_path"] = image_file
if pid_ifile not in hands_dict[img_id]["persons"]:
hands_dict[img_id]["persons"][pid_ifile] = dict()
if hand_dir not in hands_dict[img_id]["persons"][pid_ifile]:
hands_dict[img_id]["persons"][pid_ifile][hand_dir] = dict()
hands_dict[img_id]["persons"][pid_ifile][hand_dir]["pt_path"] = pt_file
totalWriteCount = len(hands_dict)
printEveryXIterations = max(1, round(totalWriteCount / 10))
print("Real #images: " + str(totalWriteCount))
# Iterate each image
images = []
annotations = []
ii = -1
pid = -1
for key, value in hands_dict.iteritems():
ii+=1
if ii % printEveryXIterations == 0:
print('Sample %d of %d' % (ii+1, totalWriteCount))
elif sGenerateLightVersionForDebugging:
continue
#print(key)
#if key != "000154": continue
image_path = value["image_path"]
# Image
img = None
img = cv2.imread(image_path)
img_width = img.shape[1]
img_height = img.shape[0]
# Image Object
image_object = dict()
image_object["id"] = ii
image_object["file_name"] = image_path.split("/")[-1]
image_object["width"] = img_width
image_object["height"] = img_height
images.append(image_object)
# Anno object
person_array = []
# anno = dict()
# anno["image_path"] = image_path
# anno["image_id"] = ii
# anno["annorect"] = []
# add = True
# Iterate each person
for person_key, person_value in value["persons"].iteritems():
pid+=1
# Load
mpii_body_pts = []
left_hand_pts = []
if "l" in person_value:
left_hand_pts = load_json(person_value["l"]["pt_path"])["hand_pts"]
mpii_body_pts.extend(load_json(person_value["l"]["pt_path"])["mpii_body_pts"])
mpii_body_pts.extend(load_json(person_value["l"]["pt_path"])["head_box"])
mpii_body_pts.extend(left_hand_pts)
right_hand_pts = []
if "r" in person_value:
right_hand_pts = load_json(person_value["r"]["pt_path"])["hand_pts"]
mpii_body_pts.extend(load_json(person_value["r"]["pt_path"])["mpii_body_pts"])
mpii_body_pts.extend(load_json(person_value["r"]["pt_path"])["head_box"])
mpii_body_pts.extend(right_hand_pts)
# Populate with fake points if not there
if len(left_hand_pts) == 0:
for i in range(0, 21): left_hand_pts.append([0,0,0])
if len(right_hand_pts) == 0:
for i in range(0, 21): right_hand_pts.append([0,0,0])
# Check if 21
if len(left_hand_pts) != 21 or len(right_hand_pts) != 21:
print("Error")
stop
# Draw
# for pt in mpii_body_pts:
# if len(pt) == 3:
# if pt[2] == 0: continue
# cv2.circle(img, (int(pt[0]), int(pt[1])), 2, (255,0,0), -1)
for pt in left_hand_pts:
if pt[2] == 0: continue
cv2.circle(img, (int(pt[0]), int(pt[1])), 2, (0,0,255), -1)
for pt in right_hand_pts:
if pt[2] == 0: continue
cv2.circle(img, (int(pt[0]), int(pt[1])), 2, (0,255,0), -1)
# Mix together
all_points = left_hand_pts
all_points.extend(right_hand_pts)
# Convert into caffe visibility?
caffe_points = []
for point in all_points:
caffe_points.append(point[0])
caffe_points.append(point[1])
caffe_points.append(point[2])
# Get rectangle
rect = get_rect_from_points_only_bigger(mpii_body_pts, img_width, img_height, 0.1)
rectW = rect[2]-rect[0]
rectH = rect[3]-rect[1]
# Display - Rectangle
if sDisplayDebugging:
cv2.rectangle(img, (int(rect[0]), int(rect[1])), (int(rect[2]), int(rect[3])), 255, 2)
# Store Person Data
data = dict()
data["segmentation"] = [] # DONT HAVE
data["num_keypoints"] = len(all_points)
data["img_path"] = image_path.split("/")[-1]
data["bbox"] = [rect[0], rect[1], rectW, rectH]
data["area"] = data["bbox"][2]*data["bbox"][3]
data["iscrowd"] = 0
data["keypoints"] = caffe_points
data["img_width"] = img_width
data["img_height"] = img_height
data["category_id"] = 1
data["image_id"] = ii
data["id"] = pid
person_array.append(data)
# Append Annot
for arr in person_array:
annotations.append(arr)
# Display
if sDisplayDebugging:
show_image(img)
cv2.waitKey(-1)
# Json Object
json_object = dict()
json_object["info"] = dict()
json_object["info"]["version"] = 1.0
json_object["info"]["description"] = "Hands MPII Dataset in COCO Json Format"
json_object["licenses"] = []
json_object["images"] = images
json_object["annotations"] = annotations
# JSON writing
print("Saving " + jsonOutput + "...")
print("Final #Images: " + str(len(json_object["images"])))
print("Final #Annotations: " + str(len(json_object["annotations"])))
open(jsonOutput, 'w').close()
with open(jsonOutput, 'w') as outfile:
json.dump(json_object, outfile)
print("Saved!")
# Test
sImageAndAnnotFolder = sDatasetFolder + "hand_labels/manual_test/"
sJsonOutput = sDatasetFolder + 'json/hand42_mpii_test.json'
runHands(sJsonOutput, sImageAndAnnotFolder)
print(' ')
# Train
sImageAndAnnotFolder = sDatasetFolder + "hand_labels/manual_train/"
sJsonOutput = sDatasetFolder + 'json/hand42_mpii_train.json'
runHands(sJsonOutput, sImageAndAnnotFolder)
|
19,398 | 81de966a5dcd6f63725a11335afe3e09940f377a | import os
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from ConvNet.config.defaults import get_cfg_defaults
from sklearn.decomposition import PCA
from ConvNet.tools.deal_with_obj import loadObj
# if np.any(np.isnan(input.cpu().numpy())): # 判断输入数据是否存在nan
# print('Input data has NaN!')
#
# if np.isnan(loss.item()): # 判断损失是否为nan
# print('Loss value is NaN!')
if __name__ == '__main__':
cfg = get_cfg_defaults()
image_path = cfg.INPUT.SAVE_RESIZE_IMAGES
# net = CNN()
# optimizer = torch.optim.Adam(net.parameters(), lr=cfg.SOLVER.BASE_LR)
# net.load_state_dict(torch.load(cfg.OUTPUT.PARAMETER + cfg.OUTPUT.SAVE_NET_FILENAME))
# print('loaded net successfully!')
# optimizer.load_state_dict(torch.load(cfg.OUTPUT.PARAMETER + cfg.OUTPUT.SAVE_OPTIMIZER_FILENAME))
# print('loaded optimizer successfully!')
# loader = DataLoader(DataSet(image_path), batch_size=cfg.INPUT.BATCH_SIZE, shuffle=True)
# for i, (images, labels) in enumerate(loader, start=1):
input_path = cfg.INPUT.VERTICS_PATH
file_list = os.listdir(input_path)
file_list.sort(key=lambda x: len(x))
data = []
for i in range(len(file_list)):
print(i, 'finished')
vertics, faces = loadObj(input_path + file_list[i])
data.append(np.array(vertics).reshape((7657, 3)))
data = np.array(data)
data = data.reshape(len(file_list)-4000, 3*7657).T
fig = plt.figure()
ax = Axes3D(fig)
pca = PCA(n_components=826)
pca_feature = pca.fit_transform(data)
ax.scatter(pca_feature[0, :], pca_feature[1, :], pca_feature[2, :], alpha=0.9, edgecolors='white')
feature_info = np.zeros(826)
for j in range(826):
feature_info[j] = sum(pca.explained_variance_ratio_[0:j])
A = pca.inverse_transform(pca_feature)
# def TensorBoard(tb, images, loss, i):
# grid = torchvision.utils.make_grid(images)
# tb.add_image('Image', grid, 0)
# tb.add_graph(net, images)
# tb.add_scalar('Loss', loss, i)
# tb.add_histogram('conv1.weight', net.conv_net.conv1.weight)
# tb.add_histogram('bn2.weight', net.conv_net.bn2.weight)
# tb.add_histogram('bn2.bias', net.conv_net.bn2.bias)
# tb.add_histogram('conv3.weight', net.conv_net.conv3.weight)
# tb.add_histogram('conv3.bias', net.conv_net.conv3.bias)
# tb.add_histogram('bn4.weight', net.conv_net.bn4.weight)
# tb.add_histogram('bn4.bias', net.conv_net.bn4.bias)
# tb.add_histogram('conv5.weight', net.conv_net.conv5.weight)
# tb.add_histogram('conv5.bias', net.conv_net.conv5.bias)
# tb.add_histogram('bn6.weight', net.conv_net.bn6.weight)
# tb.add_histogram('bn6.bias', net.conv_net.bn6.bias)
# tb.add_histogram('conv7.weight', net.conv_net.conv7.weight)
# tb.add_histogram('conv7.bias', net.conv_net.conv7.bias)
# tb.add_histogram('bn8.weight', net.conv_net.bn8.weight)
# tb.add_histogram('bn8.bias', net.conv_net.bn8.bias)
# tb.add_histogram('conv9.weight', net.conv_net.conv9.weight)
# tb.add_histogram('conv9.bias', net.conv_net.conv9.bias)
# tb.add_histogram('bn10.weight', net.conv_net.bn10.weight)
# tb.add_histogram('bn10.bias', net.conv_net.bn10.bias)
# tb.add_histogram('conv11.weight', net.conv_net.conv11.weight)
# tb.add_histogram('conv11.bias', net.conv_net.conv11.bias)
# tb.add_histogram('bn12.weight', net.conv_net.bn12.weight)
# tb.add_histogram('bn12.bias', net.conv_net.bn12.bias)
# tb.add_histogram('fc.weight', net.fc.fc.weight)
# tb.add_histogram('fc.bias', net.fc.fc.bias)
# # tensorboard --logdir=./
# def TensorBoard(images, loss, i):
# grid = torchvision.utils.make_grid(images)
# tb.add_image('Image', grid, 0)
# tb.add_graph(net, images)
# tb.add_scalar('Loss', loss, i)
# tb.add_histogram('conv.weight', net.conv.weight)
# tb.add_histogram('bn1.weight', net.bn1.weight)
# tb.add_histogram('bn1.bias', net.bn1.bias)
# tb.add_histogram('fc1.weight', net.fc1.weight)
# tb.add_histogram('fc1.bias', net.fc1.bias)
# # tensorboard --logdir=./
|
19,399 | 4cbd5781cca90fe8dd207318fb7303a457bff75b | #from IPython.display import clear_output
def display_board(board):
print(' {} | {} | {}'.format(board[7], board[8], board[9]))
print('---------------')
print(' {} | {} | {}'.format(board[4], board[5], board[6]))
print('---------------')
print(' {} | {} | {}'.format(board[1], board[2], board[3]))
def player_input():
marker = ''
while not (marker == 'X' or marker == 'O'):
marker = raw_input('Player 1: Do you want to be X or O? ').upper()
if marker == 'X':
return ('X', 'O')
else:
return ('O', 'X')
def place_marker(board, marker, position):
board[position] = marker
def win_check(board, mark):
if board[1] == mark and board[2] == mark and board[3] == mark: #horizontal
return True
elif board[4] == mark and board[5] == mark and board[6] == mark: #horizontal
return True
elif board[7] == mark and board[8] == mark and board[9] == mark: #horizontal
return True
elif board[1] == mark and board[4] == mark and board[7] == mark: #vertical
return True
elif board[2] == mark and board[5] == mark and board[8] == mark: #vertical
return True
elif board[3] == mark and board[6] == mark and board[9] == mark: #vertical
return True
elif board[3] == mark and board[5] == mark and board[7] == mark: #diagonal
return True
elif board[1] == mark and board[5] == mark and board[9] == mark: #diagonal
return True
else:
return False
import random
def choose_first():
if random.randint(0,1) == 0:
return "Player 1"
else:
return "Player 2"
def space_check(board, position):
#print(board[position] == ' ')
#print(position)
return board[position] == ' '
def full_board_check(board):
for i in range(1,10):
if space_check(board, i):
return False
return True
def player_choice(board):
position = 0
if full_board_check(board):
print("It's a tie!")
position = 500
return position
while position not in [1,2,3,4,5,6,7,8,9] or not space_check(board, position):
position = int(raw_input('Choose your next position: (1-9) '))
# print(full_board_check(board))
if full_board_check(board):
print("It's a tie!")
position = 500
break
return position
def replay():
response = raw_input("Do you want to play again? Enter y or n: ")
if response == 'y':
return True
else:
return False
print('Welcome to Tic Tac Toe!')
while True:
# Set the game up here
#Set up board and have player pick their marker
board = [' ']* 10
player1, player2 = player_input()
#Choose who will go first
turn = choose_first()
print( turn + ' will go first.')
ready = raw_input('Are you ready ' + turn + '? Enter y or n: ')
if ready == 'y':
game_on = True
else:
game_on = False
while game_on:
# Player 1's Turn
if 'Player 1' == turn:
display_board(board)
#Pick position
position = player_choice(board)
if position == 500:
break
#Check if position is available
result = space_check(board, position)
#If it is available, mark the position
if result:
place_marker(board, player1, position)
#Check if player 1 has won
didyouwin = win_check(board, player1)
if didyouwin:
display_board(board)
print("Congrats Player 1, you won!")
break
#if no tie, player 2's turn
print("Now it's Player 2's turn!")
turn = 'Player 2 '
else:
display_board(board)
#Pick position
position = player_choice(board)
if position == 500:
break
#Check if position is available
result = space_check(board, position)
#If it is available, mark the position
if result:
place_marker(board, player2, position)
#Check if player 1 has won
didyouwin = win_check(board, player2)
if didyouwin:
display_board(board)
print("Congrats Player 2, you won!")
break
#if no tie, player 2's turn
print("Now it's Player 1's turn!")
turn = 'Player 1'
if not replay():
break |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.