blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M ⌀ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
96951c68078c5d34e2f175d5c5346d8fa2e87015 | a1a06b5cb70c2d9a7ee3869733035f8a16c6cd84 | /average_perceptron.py | dff628937b854af3a1414be6d9ef053cb068bfd3 | [] | no_license | VineethRavi/Average-Vanilla-Perceptrons-From-scratch-MNIST | 863e311bd8ccec02d8c3e6bc9b2f5b7fc7595c70 | ce72cb636764ad1745ab6e440db894e0fbf6c4a0 | refs/heads/master | 2020-04-01T22:11:20.659147 | 2018-10-18T22:48:56 | 2018-10-18T22:48:56 | 153,696,192 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 13,496 | py |
"""
Importing the Neccessary Libraries for hw2
Average Perceptron
"""
import struct
import numpy as np
import time
import sys
import csv
import gzip
#import matplotlib.pyplot as plt
# Remove above comment, if you want to generate the Plots required for the learning curves
#start = time.time()
"""
Reading the Data Directly from the compressed .gz files
The input Folder name is passed as final arugment from the command line - filename
"""
def read_data(filename,File):
with gzip.open(filename+'/'+File) as f:
zero, data_type, dims = struct.unpack('>HBB', f.read(4))
shape = tuple(struct.unpack('>I', f.read(4))[0] for d in range(dims))
return np.fromstring(f.read(), dtype=np.uint8).reshape(shape)
"""
The Confusion Matrix, function , which is used for computing the F1 Scores
"""
def CM(Y_pred,Y_true):
Con_Mat=np.zeros((11,11))
TP=np.zeros(11)
FP=np.zeros(11)
FN=np.zeros(11)
F=np.zeros(11)
# Updating the values in the confusion matrix for multi-class classification
for i in range(0,len(Y_pred)):
Con_Mat[int(Y_true[i])][int(Y_pred[i])]=Con_Mat[int(Y_true[i])][int(Y_pred[i])]+1
for i in range(0,11):
for j in range(0,11):
if(i==j):
TP[i]=Con_Mat[i][j] # True Positive Count for each label
else:
FN[i]=FN[i]+Con_Mat[i][j] # False Negative Count for each label
FP[i]=FP[i]+Con_Mat[j][i] # False Positive Count for each label
if(TP[i]==0):
F[i]=0
else:
F[i]=2*TP[i]/float(2*TP[i]+FP[i]+FN[i]) # F1 Score computation
F1_Score=float(np.sum(F))/(len(np.unique(Y_true))) # MACRO F1 Score
Accuracy=float(np.sum(TP))/(len(Y_pred)) # Accuracy computation
return Accuracy,F1_Score
"""
Making Predictions for the digit type based on the argmax value of W^T.X
Implemented using the Perceptron Algorithm
"""
def predict(w,x):
b=np.matmul(w,x[:-1]) # Finding value of W^T.X, for all 10 perceptrons
y_p=np.argmax(b) # Finding the best perceptron, with highest score or value
return y_p # Returning the class with the highest score
"""
Testing The Perceptron Algorithm
"""
def test_perceptron(test_data,test_label,w):
test_data=np.c_[test_data,test_label]
Y_tr=[]
Y_pr=[]
for i in range(0,len(test_data)):
y_p=predict(w,test_data[i]) # Computing the predicted class or label
y_t=test_data[i][-1] # Actual True class label
Y_tr.append(y_t)
Y_pr.append(y_p)
Test_Accuracy,Test_F1Score=CM(Y_pr,Y_tr) # Computing Accuracy and F1 Score
#print("Test: F1 Score: %f , Accuracy: %f" %(Test_F1Score,Test_Accuracy))
return Test_F1Score,Test_Accuracy # Returning Accuracy, F1 Score values
"""
Training The Perceptron Algorithm
Computing the weight vectors for the 10 perceptrons
"""
def train_perceptron(train_data,train_label,N_train,N_epoch,N_learn_rate):
train_data=train_data[0:N_train] # Train data, based on number of training examples
train_label=train_label[0:N_train]
train_data=np.c_[train_data,train_label]
"""
Choose which type of initialization of weights you want, either 1) or 2) and comment out the other line
1 - 2*np.random.rand(10,785) -1 : Random Initialization of weights
2 - np.zeros((10,785)) : ZERO initialization of weights
Default : 0 initialization of weights, since it gives higher test F1 and train F1 Score
Comment the other line, which you do not want to use for initialization of weights.
"""
w=2*np.random.rand(10,785)-1 # Weights for each 10 perceptrons with random initialization between (-1,1)
#w=np.zeros((10,785)) # Weights for each 10 perceptrons with zero initialization
a=np.zeros((10,785)) # The average weight vectors for the Averaged Perceptron
Y_true=[]
Y_pred=[]
for k in range(0,N_epoch): # Running the Perceptron algorithm across epochs
np.random.shuffle(train_data) # Shuffling the training data each epoch
for i in range(0,N_train): # For every training instance
y_p=predict(w,train_data[i]) # Predicting the class label or digit
y_t=train_data[i][-1] # True class label
if(y_p!=int(y_t)): # Updating weights, based on the comparison of true and predicted labels
w[int(y_t)]=w[int(y_t)]+((N_learn_rate)*(1))*train_data[i][:-1] # Incrementing Weight Update positive
array=np.matmul(w,train_data[i][:-1]) # Computing values of all the perceptrons' classes
for j in range(0,len(array)):
if((j!=int(y_t))and(array[j]>=0)): # Decrementing the weights of remaining perceptrons greater than 0 value
w[j]=w[j]+((N_learn_rate)*(-1))*train_data[i][:-1] # Decrementing Weight Update negative
# ONE VS ALL APPROACH
Y_true.append(y_t)
Y_pred.append(y_p)
a=a+w # Updating the average weight vectors for 10 perceptrons for averaged perceptron
Train_Accuracy,Train_F1Score=CM(Y_pred,Y_true) # Computing Accuracy and F1 Score
#print("Training: F1 Score: %f , Accuracy: %f" %(Train_F1Score,Train_Accuracy))
return a,Train_F1Score,Train_Accuracy # Returning the 10 perceptron weight vectors, Accuracy, F1 Score values
"""
Effect of Number of Epoch in Learning
Generating the learning curves
"""
def number_epoch():
N_ep=np.zeros(19)
F1_tr=np.zeros(19)
F1_te=np.zeros(19)
Acc_tr=np.zeros(19)
Acc_te=np.zeros(19)
for i in range(0,19): # Plotting the F1 Score, Accuracy Learning curves vs Number of epochs
N_epoch=10+i*5 # Varying the number of epochs
N_ep[i]=N_epoch
print("The Number of training examples is :- %d ." % (N_train))
print("The Number of epochs is :- %d ." % (N_epoch))
print("The Learning Rate :- %f ." % (N_learn_rate))
w,F1_tr[i],Acc_tr[i]=train_perceptron(train_data,train_label,N_train,N_epoch,N_learn_rate)
F1_te[i],Acc_te[i]=test_perceptron(test_data,test_label,w)
plt.figure(1)
plt.plot(N_ep,Acc_tr, label = "Training Accuracy Score")
plt.figure(2)
plt.plot(N_ep,F1_tr, label = "Training F1 Score")
plt.figure(1)
plt.plot(N_ep,Acc_te, label = "Test Accuracy Score")
plt.figure(2)
plt.plot(N_ep,F1_te, label = "Test F1 Score")
plt.figure(1)
plt.xlabel('Number of Epochs')
# naming the y axis
plt.ylabel('Accuracy')
# giving a title to my graph
plt.title('Accuracy vs Epochs')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('Accuracy_Epoch.png')
plt.figure(2)
plt.xlabel('Number of Epochs')
# naming the y axis
plt.ylabel('F1 Score')
# giving a title to my graph
plt.title('F1 Scores vs Epochs')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('F1_Score_Epoch.png')
"""
Effect of Size of Training Set in Learning
Generating the learning curves
"""
def training_set_size():
N_tr=np.zeros(39)
F1_tr=np.zeros(39)
F1_te=np.zeros(39)
Acc_tr=np.zeros(39)
Acc_te=np.zeros(39)
for i in range(0,39): # Plotting the F1 Score, Accuracy Learning curves vs Training Example Size
N_train=500+(i*250) # Varying the Number of training examples size
N_tr[i]=N_train
print("The Number of training examples is :- %d ." % (N_train))
print("The Number of epochs is :- %d ." % (N_epoch))
print("The Learning Rate :- %f ." % (N_learn_rate))
w,F1_tr[i],Acc_tr[i]=train_perceptron(train_data,train_label,N_train,N_epoch,N_learn_rate)
F1_te[i],Acc_te[i]=test_perceptron(test_data,test_label,w)
plt.figure(3)
plt.plot(N_tr,Acc_tr, label = "Training Accuracy Score")
plt.figure(4)
plt.plot(N_tr,F1_tr, label = "Training F1 Score")
plt.figure(3)
plt.plot(N_tr,Acc_te, label = "Test Accuracy Score")
plt.figure(4)
plt.plot(N_tr,F1_te, label = "Test F1 Score")
plt.figure(3)
plt.xlabel('Number of Training Examples')
# naming the y axis
plt.ylabel('Accuracy')
# giving a title to my graph
plt.title('Accuracy vs Number of Training Examples')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('Accuracy_Trainsize.png')
plt.figure(4)
plt.xlabel('Number of Training Examples')
# naming the y axis
plt.ylabel('F1 Score')
# giving a title to my graph
plt.title('F1 Scores vs Number of Training Examples')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('F1_Score_Trainsize.png')
"""
Effect of Learning Rate
Generating the learning curves
"""
def learn_rate():
N_lr=np.zeros(4)
F1_tr=np.zeros(4)
F1_te=np.zeros(4)
Acc_tr=np.zeros(4)
Acc_te=np.zeros(4)
for i in range(0,4): # Plotting the F1 Score, Accuracy Learning curves vs Learning Rate
N_learn_rate=0.00001*(10**(i+1)) # Varying the Learning Rate
N_lr[i]=N_learn_rate
print("The Number of training examples is :- %d ." % (N_train))
print("The Number of epochs is :- %d ." % (N_epoch))
print("The Learning Rate :- %f ." % (N_learn_rate))
w,F1_tr[i],Acc_tr[i]=train_perceptron(train_data,train_label,N_train,N_epoch,N_learn_rate)
F1_te[i],Acc_te[i]=test_perceptron(test_data,test_label,w)
plt.figure(5)
plt.semilogx(N_lr,Acc_tr,'bo-',label = "Training Accuracy Score")
plt.figure(6)
plt.semilogx(N_lr,F1_tr,'bo-', label = "Training F1 Score")
plt.figure(5)
plt.semilogx(N_lr,Acc_te,'ro-', label = "Test Accuracy Score")
plt.figure(6)
plt.semilogx(N_lr,F1_te,'ro-', label = "Test F1 Score")
plt.figure(5)
plt.xlabel('Learning Rate')
# naming the y axis
plt.ylabel('Accuracy')
# giving a title to my graph
plt.title('Accuracy vs Learning Rate')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('Accuracy_learn.png')
plt.figure(6)
plt.xlabel('Learning Rate')
# naming the y axis
plt.ylabel('F1 Score')
# giving a title to my graph
plt.title('F1 Scores vs Learning Rate')
# show a legend on the plot
plt.legend()
# function to show the plot
plt.savefig('F1_Score_learn.png')
"""
Reading the Hyperparameters for the Perceptron from the command line
"""
arg=sys.argv
N_train=int(arg[1])
N_epoch=int(arg[2])
N_learn_rate=float(arg[3])
filename=arg[4]
test_data=read_data(filename,"t10k-images-idx3-ubyte.gz")
test_label=read_data(filename,"t10k-labels-idx1-ubyte.gz")
train_data=read_data(filename,"train-images-idx3-ubyte.gz")
train_label=read_data(filename,"train-labels-idx1-ubyte.gz")
"""
Pre-Processing the Data Set
"""
train_data=train_data[0:10000] # Taking the first 10000 training examples
train_label=train_label[0:10000]
train_data=train_data/255.0 # Dividing by the gray scale max threshold value
train_data=(train_data>=0.5) # Converting to binary featrure values
train_data=train_data.astype('int')
test_data=test_data/255.0 # Dividing by the gray scale max threshold value
test_data=(test_data>=0.5) # Converting to binary featrure values
test_data=test_data.astype('int')
train_data=train_data.reshape((10000,784)) # Reshaping 28x28 image vector to 784x1 feature input
test_data=test_data.reshape((10000,784)) # Reshaping 28x28 image vector to 784x1 feature input
train_data=np.c_[train_data,np.ones((10000,1))] # Adding the bias term , with feature value as 1 to every instance
test_data=np.c_[test_data,np.ones((10000,1))] # Adding the bias term , with feature value as 1 to every instance
"""
Functions for Plotting the Learning Curves
Remove comments for the below lines of code to generate the learning curves
N_train=10000
N_epoch=50
N_learn_rate=0.001
training_set_size()
N_train=10000
N_epoch=50
N_learn_rate=0.001
number_epoch()
N_train=10000
N_epoch=50
N_learn_rate=0.001
learn_rate()
"""
# The hyperparameters used for the perceptron algorithm
N_train=int(arg[1])
N_epoch=int(arg[2])
N_learn_rate=float(arg[3])
# Training the Perceptron Algorithm for computing the weight vectors
w,Train_F1Score,Train_Accuracy=train_perceptron(train_data,train_label,N_train,N_epoch,N_learn_rate)
# Testing the Perceptron Algorithm using the test data
Test_F1Score,Test_Accuracy=test_perceptron(test_data,test_label,w)
print("Training F1 Score: %f " %(Train_F1Score))
#print("Training: F1 Score: %f , Accuracy: %f" %(Train_F1Score,Train_Accuracy))
print("Test F1 Score: %f " %(Test_F1Score))
#print("Test: F1 Score: %f , Accuracy: %f" %(Test_F1Score,Test_Accuracy))
# Timing Metrics
end = time.time()
#print("The time taken for the algorithm computation is :- %f seconds." % (end-start)) | [
"42988138+VineethRavi@users.noreply.github.com"
] | 42988138+VineethRavi@users.noreply.github.com |
ecc3644786a8279c6b15d81b752c0f789844dc8c | 6cb1f283cee25b320e01602d5fd9fa79f7e4793a | /HW6.py | f1a083c5d0afcb7050d03586fda0d928b8afb901 | [] | no_license | landuan/HPM573S18_DUAN_HW6 | ae014cc5c1ee4c8272679e34f6fd079a553983a9 | 8a5e378536f09e9ff215d5f90ecee1d4a4c110b2 | refs/heads/master | 2020-04-06T07:08:48.068341 | 2018-03-07T04:44:38 | 2018-03-07T04:44:38 | 124,180,321 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,679 | py | import scipy.stats as stato
import StatisticalClasses as Stat
import random
class Game:
def __init__(self, id):
self.id = id
def simulation(self):
x = -250
i = 0
j = 0
step = ["H", "T", "H", "T", "H", "T", "H", "T", "H", "T", "H", "T", "H", "T", "H", "T", "H", "T", "H",
"T"]
for j in range(0, len(step)):
step[j] = random.choice(["H", "T"])
j = j + 1
for i in range(0, 18):
if step[i] == 'T' and step[i + 1] == 'T' and step[i + 2] == 'H':
x += 100
i = i + 3
else:
x += 0
i = i + 1
return x
class Cohort:
def __init__(self, id, pop_size):
self.step = []
self.total_score = []
self._sumSTAT=\
Stat.SummaryStat('Gamblers total score', self.total_score)
n = 1
while n <= pop_size:
gameunit = Game(id * pop_size + n)
self.step.append(gameunit)
n += 1
def simulatecohort(self):
for game in self.step:
value = game.simulation()
self.total_score.append(value)
def get_expected_score(self):
return sum(self.total_score)/len(self.total_score)
def get_CI(self, alpha):
return self._sumSTAT.get_t_CI(alpha)
class MultiCohort:
def __init__(self,ids,pop_sizes):
self._ids=ids
self._popsizes=pop_sizes
self._get_all_rewards=[]
def simulate(self):
for i in range(len(self._ids)):
cohort=Cohort(i,self._popsizes)
cohort.simulatecohort()
self._get_all_rewards.append(cohort.get_expected_score())
def proportion_CI(p,n,alpha):
CI = [0, 0]
std_dev = pow(p * (1 - p), 0.5) / pow(n, 0.5)
half_length = stato.t.ppf(1-alpha/2,n) * std_dev
CI[0] = p - half_length
CI[1] = p + half_length
return CI
alpha = 0.05
test = Cohort(2,1000)
test.simulatecohort()
stat = Stat.SummaryStat('Gamblers total score', test.total_score)
ExpectedCI=stat.get_t_CI(alpha)
print("the 95% CI of the expected reward is", ExpectedCI)
count = 0
for i in range(0,len(test.total_score)):
if test.total_score[i]<0:
count+=1
else:
count+=0
probability = count/float(len(test.total_score))
CIofProb=proportion_CI(probability,len(test.total_score),alpha)
print("95% CI is ", CIofProb)
# Q2
print("the expected reward means that if we stimulate the game for many times and a confidence interval is received each time, 95% of the interval will cover true means.")
# Q3:
print("for casino owner, "
"he/she should consider long-term profit"
"so the true expected reward of the game should be concerned. "
"Therefore, I suggest the CI of rewards and probability.")
print("the 95 % CI of expected reward is",ExpectedCI,
"the 95% CI of expected rewards means "
"that if the game is stimulated for many times"
"a CI is received each time, 95% of these intervals will cover true mean.")
print("95 % CI of probability is", CIofProb,
"95% CI of probability means that "
"if the game is repeated for many times"
"a confidence interval of probability is received each time,"
"95% of these intervals will cover true probability of loss).")
number_of_simulaiton=1000
gambler_try=MultiCohort(range(number_of_simulaiton),10)
gambler_try.simulate()
sum_of_statpi=Stat.SummaryStat
expected_reward_gambler=stat.get_PI(alpha)
print(expected_reward_gambler)
print("This means that there are 95% probability "
"that your expected reward in next 10-game lies in", expected_reward_gambler)
| [
"lan.duan@yale.edu"
] | lan.duan@yale.edu |
08f63f7ce0f8c721c93fabb907980339ae745dcf | 400e971259221851dcfa8d367ac4814c6799dbd7 | /languages/cookbook/tour/python.py | e82a026dfa230733f8c035446019bab2190aeb87 | [] | no_license | jayramrout/learning | ec5db84b575086446f14d1e8b4837a6c64a9d054 | 21d27bbe8cad7c2fd1bdad15bbba24707e122d24 | refs/heads/master | 2022-04-24T14:46:49.325746 | 2020-04-27T06:55:31 | 2020-04-27T06:55:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,044 | py | # Outputting a string
print('Hello World!')
# Assigning a value to a variable
my_variable = 5
# Defining a class with an instance method
class MyClass:
def my_method(self):
return "my_method was invoked"
# Instantiating an object from a class
my_object = MyClass()
# Checking what class an object is
print(isinstance(my_object, MyClass)) # => True
print(isinstance('Hello, World', str)) # => True
# Invoking a method on an object
my_object.my_method() # => "my_method was invoked"
# Creating a list (an array) of values
my_list = [5, 'foobar', 3.14, True, False, None]
# Appending values to a list
my_list.append('bla')
# Get the length/size of the list
len(my_list) # => 7
# Accessing value by index
my_list[1] # => 'foobar'
# Iterating over a list (a typical loop)
for value in my_list:
print(value)
# Create a dictionary with key-value pairs
my_dict = {
'name': 'Peter',
'age': 36
}
# Reading a value from a dict
print(my_dict['name'])
# Writing a value to a dict
my_dict['name'] = 'Mauritz'
print(my_dict['name'])
| [
"peter@marklunds.com"
] | peter@marklunds.com |
c1d52d8204145e5e856289646ba9955ee813ccc5 | 74db2f30b62f6524c716fc3bc301b4177ec96876 | /sort_test.py | aff7c597724332696f79a63ef51a75ea8f8ea50a | [] | no_license | Michael-py/coding_challenges | a2e8015f2e936b2e356b8ff0f3e089fbff549140 | fe5b343552fc510ef2eb4511f295767a64f455a2 | refs/heads/main | 2023-05-03T17:58:06.087414 | 2021-05-17T09:29:54 | 2021-05-17T09:29:54 | 368,127,866 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 563 | py |
# function takes two lists as arguments
def sort_and_test(list1, list2):
# check if the 2 lists are of the same length
# return False, if not
if len(list1) != len(list2):
return False
else:
# sort the first list ans store in a variable lst
lst = sorted(list1)
# iterate over the first list
for i in lst:
# check if each number in lst occur in list2
# return True if it occurs
# Else return False
if i in list2:
return True
else:
return False
x = [10,9,8,7,6,5]
y = list(range(5,11))
print(sort_and_test(x, y))
| [
"dairobidemichael@gmail.com"
] | dairobidemichael@gmail.com |
1a6bfbbed305ea623e2da442fa25a000b9f34077 | 53568d7c9ca6d53f3f90fe45d33cf6357a732a88 | /170521-lambda-expresions,list-comprehension,classes/ulamek.py | 4b3d6cf7509a99713ff711da7a639b031f54f698 | [] | no_license | majsylw/Python-3.x-examples | eb7ce7df9c582f7b56fa6d40db5f96479858f867 | 59b56ca98a0ea27ce48fb47a173333bf0a9d1349 | refs/heads/main | 2023-06-08T07:24:53.052672 | 2021-06-29T12:46:15 | 2021-06-29T12:46:15 | 348,288,268 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,596 | py | """
Przykładowa definicja klasy ułamek -- wykorzystanie metod specjalnych
"""
import math
class Ulamek:
def __init__(self, licznik, mianownik):
assert(mianownik > 0)
self.licznik, self.mianownik = licznik, mianownik
self.skracanie()
# funkcja print
def __str__(self):
return f'{self.licznik}/{self.mianownik}'
def skracanie(self):
temp = math.gcd(self.licznik, self.mianownik)
self.licznik //= temp
self.mianownik //= temp
# przeciążamy operator ==
def __eq__(self, u2):
return self.licznik == u2.licznik and self.mianownik == u2.mianownik
# przeciążamy operator + uzywając napisanej wcześniej metody statycznej
def __add__(self, inny_ulamek):
return Ulamek.dodawanie(self, inny_ulamek)
# przeciążamy operator *
def __mul__(self, u2):
wynik = Ulamek(self.licznik*u2.licznik,
self.mianownik*u2.mianownik)
return wynik
# metoda statyczna
@staticmethod
def dodawanie(ulamek1, ulamek2):
wynik = Ulamek(ulamek1.licznik*ulamek2.mianownik + ulamek2.licznik*ulamek1.mianownik,
ulamek1.mianownik*ulamek2.mianownik)
wynik.skracanie()
return wynik
if __name__ == '__main__':
u1 = Ulamek(3, 4)
u2 = Ulamek(2, 6)
print(u1)
print(u1, '+', u2, '=', Ulamek.dodawanie(u1, u2)) # wykorzystanie metody statycznej
print(u1, '+', u2, '=', u1 + u2) # przeciażenie +
print(u1, '*', u2, '=', u1 * u2) # przeciażenie *
print(u1, '==', u2, '->', u1 == u2)
| [
"58003896+majsylw@users.noreply.github.com"
] | 58003896+majsylw@users.noreply.github.com |
a3e2d4f661351a8675bdcd8cb45fa8d33b3ccc33 | 8aff1a81d374d9f6b50cce2d953c8be44a4168d1 | /app.py | 53a7e99dd9d826efad96b730552f11026b3b991f | [] | no_license | itsromack/finsys | 219b225fdb82817a74cbe60e80a058b637d8617a | 53e50790fab5a90b8173e37d08240ec3410666d5 | refs/heads/master | 2020-12-03T05:09:10.754404 | 2017-09-01T02:04:43 | 2017-09-01T02:04:43 | 95,738,897 | 0 | 0 | null | 2017-07-10T23:49:53 | 2017-06-29T04:36:23 | JavaScript | UTF-8 | Python | false | false | 233 | py | from flask import Flask, jsonify
import random
app = Flask(__name__)
@app.route('/api/test', methods=['GET'])
def test():
data = [1, 2, 3, 4, 5, 6, 7]
return jsonify(data)
if __name__ == "__main__":
app.run(debug=True) | [
"romacknatividad@gmail.com"
] | romacknatividad@gmail.com |
3da29da5d1370774c615d9cafb36896069e60977 | 7bfe4bb8851f8241bb4d85327f54445df171cb3b | /bfdict.py | c029441a1fdb8aebf13e29a4340da53fc0ef1c41 | [] | no_license | technophage/bfdict | fc5dbf355e0a80a8c63301816619bc749b3f64ed | 7a17daef88fc513e025454316e43874cc188a5f8 | refs/heads/master | 2021-01-21T19:45:30.446313 | 2018-08-22T14:40:14 | 2018-08-22T14:40:14 | 92,157,896 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 18,150 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
banner = """
__ ___ __ __ __
| |--.' _|.--| |__|.----.| |_
| _ | _|| _ | || __|| _|
|_____|__| |_____|__||____||____|
[.py]
bruteforce dictonary generator
sin@technophage.net
"""
import sys
try:
from optparse import OptionParser
except:
print('[*][bfdict] module load error')
print('[*][bfdict] OptionParser is required for command line processing')
try:
import cPickle as pickle
import os
except:
print('[*][bfdict] module load error')
print('[*][bfdict] resume requires cPickle to be installed.')
print('[*][bfdict] pip install cPickle')
class bfdict(object):
'''
[Module documentation:]
Paramiters:
** setting these is mandatory! **
these must be set so we have our upper an lower limits for generation;
.mnlen int minimum/starting word length
.mxlen int maximum word length
validated by:
mnlen >= 1
mxlen >= mnlen
** at least one of these must be set, so we have chars to work with; **
.uselower flag True/False enables std lowercase chars
.useupper flag True/False enables std uppercase chars
.usenumber flag True/False enables number chars
.usesymbol flag True/False enables keyboard symbol chars
alternitavley the use of these overides all the previous char set flags, and setting them to false.
it requires you set the string of chars you want or it will error.
.usecustom flag True/False if set assign a string of the chars to customdict
.customdict str
** optional options **
.prepend str sets a static prepend string to the begining of generated word
.append str sets a static append string to the end of generated word
Callable meathods:
.interactivesetup()
Interactive setup annoyingly asks you questions so you dont have to set the options
in the script.
.next_word()
Returns the next word in sequence using the options you set,
Increments counters so on the next call it will return the word next in sequence.
After the last word is produced returns null.
.savestate(filename)
Uses cPickle to save the in memory bfdict object to file,
this should generally be used in consort with .loadstate()
If no filename is passed it attempts to use '.bfdict' in the modules
working directory.
In order to use this automatically, in the main loop of your program,
place a KeyboardInterrupt exception handler, which calls
[object].savestate(filename)
or even;
if [object].resumesave:
[object].savestate(filename)
.loadstate(filename)
Load previous bfdict instance object from file to resume from a previous run.
If a filename is not passed it will attermpt to load '.bfdict' in the
modules working directory.
To use this call;
[object].loadstate(filename)
This also sets the resumesave flag to True, assuming if your resuming
once you might like to do it again. This can be run automagically if
the file exists by wrapping it in a simple file existance check;
import os
import bfdict from bfdict
bf = bfdict()
resume_file = '.bf_resume'
if os.path.isfile(resume_file):
bf.loadstate(resume_file)
'''
# class vars
#
# predefined char sets
lower = [ 'a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z']
upper = [ 'A','B','C','D','E','F','G','H','I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']
number = [ '0','1','2','3','4','5','6','7','8','9']
symbol = [ ',','.',';',':','@','#','~','[','{',']','}','!','"',"'",'$','%','^','&','*','(',')','-','_','=','+','|','?',' ']
# user defined char sets / strings
customdict = []
prepend = ""
append = ""
# use flags
uselower = False
useupper = False
usenumber = False
usesymbol = False
usecustom = False
outputfile = ''
# working vars
ci = []
cl = []
mnlen = 0
mxlen = 0
clen = 0
issetup = False
resumeload = False
resumesave = False
# class functions
#
def savestate(self, filename='.bfdict'):
try:
fh = open(filename, 'wb')
pickle.dump(self, fh)
fh.close()
return True
except IOError:
print('[*][bfdict.savestate] file IOError, can\'t open {} for writing'.format(str(filename)))
return False
except:
print('[*][bfdict.savestate] error dumping restore data')
return False
def loadstate(self, filename='.bfdict'):
if os.path.isfile(filename):
try:
# read the object out fo the file
fh = open(filename, 'rb')
bftmp = pickle.load(fh)
fh.close()
# lists
self.ci = bftmp.ci
self.cl = bftmp.cl
# int content
self.mnlen = bftmp.mnlen
self.mxlen = bftmp.mxlen
self.clen = bftmp.clen
# str content
self.prepend = bftmp.prepend
self.append = bftmp.append
self.outputfile = bftmp.outputfile
# set a ready
self.issetup = True
# since we loaded from a file, im going to assume we want to save on quit too
self.resumeload = True
self.resumesave = True
# return
return True
except IOError:
print('[*][bfdict.loadstate] file IOError, can\'t open {} for reading'.format(str(filename)))
exit(0)
except:
print('[*][bfdict.loadstate] error loading restore data')
exit(0)
else:
print('[*][bfdict.loadstate] expected resume file {} not found'.format(str(filename)))
exit(0)
def setdict(self):
if self.resumeload:
# try to load resume data
self.loadstate()
return
self.ci = [0]
self.cl = [0]
charSets = 0
inchrs = []
outchrs = []
cnum = 0
# verify self.mnlen and self.mxlen make sense
if self.mnlen <= 0:
print('[*][bfdict.setdict] Minimum word length MUST be larger than 0')
print('[*][bfdict.setdict] self.mnlen == {}'.format(str(self.mnlen)))
exit(0)
if self.mnlen > self.mxlen:
print('[*][bfdict.setdict] Minimum word length is larger than maximum word length')
print('[*][bfdict.setdict] self.mnlen == {}, self.mxlen == {}'.format(str(self.mnlen), str(self.mxlen)))
exit(0)
# set current length
self.clen = self.mnlen
# init ci array
for x in range(0, self.mxlen):
self.ci.append(0)
for x in range(0, self.mnlen):
self.ci[x] = 1
# add characters
if self.uselower:
charSets += 1
for x in self.lower:
self.cl.append(x)
self.cl[0] += 1
if self.useupper:
charSets += 1
for x in self.upper:
self.cl.append(x)
self.cl[0] += 1
if self.usenumber:
charSets += 1
for x in self.number:
self.cl.append(x)
self.cl[0] += 1
if self.usesymbol:
charSets += 1
for x in self.symbol:
self.cl.append(x)
self.cl[0] += 1
if self.usecustom and (self.customdict != None):
charSets += 1
for x in self.customdict:
self.cl.append(str(x))
self.cl[0] += 1
if charSets <= 0:
print '[*][bfdict.setdict] No characters selected'
exit(0)
# if we got this far mark as ready to go
self.issetup = True
#
#
def interactivesetup(self):
# null any set values
self.uselower = False
self.useupper = False
self.usenumber = False
self.usesymbol = False
self.usecustom = False
self.mnlen = 0
self.mxlen = 0
self.customdict = []
# word lengths
# min length
while self.mnlen <= 0:
try:
self.mnlen = int(raw_input('[+] enter minimum word length : '))
except:
self.mnlen = 0
if self.mnlen <= 0:
print '\n[*] please enter a value >= 1\n'
# max length
while self.mxlen < self.mnlen:
try:
self.mxlen = int(raw_input('[+] enter maximum word length : '))
except:
self.mxlen = 0
if self.mxlen < self.mnlen:
print '\n[*] please enter a value >= ' + str(self.mnlen) + '\n'
# character sets
# custom
try:
resp = str(raw_input('[+] use custom character set (y/n) : '))
if resp[0].lower() == 'y':
self.usecustom = True
except:
pass
if self.usecustom:
inputStr = ''
while not inputStr:
try:
inputStr = str(raw_input('[-] enter characters : '))
except:
pass
for x in range(0, len(inputStr)):
if inputStr[x] not in self.customdict:
self.customdict.append(inputStr[x])
else:
# preset char sets
# lowercase chars
try:
resp = str(raw_input('[+] use lowercase characters (y/n) : '))
if resp[0].lower() == 'y':
self.uselower = True
else:
self.uselower = False
except:
pass
# uppercase chars
try:
resp = str(raw_input('[+] use uppercase characters (y/n) : '))
if resp[0].lower() == 'y':
self.useupper = True
else:
self.useupper = False
except:
pass
# number chars
try:
resp = str(raw_input('[+] use number characters (y/n) : '))
if resp[0].lower() == 'y':
self.usenumber = True
else:
self.usenumber = False
except:
pass
# symbol chars
try:
resp = str(raw_input('[+] use standard symbol characters (y/n) : '))
if resp[0].lower() == 'y':
self.usesymbol = True
else:
self.usesymbol = False
except:
pass
# prepend
try:
resp = str(raw_input('[+] prepend string to word (y/n) : '))
if resp[0].lower() == 'y':
self.prepend = str(raw_input('[+] enter string : '))
except:
pass
# append
try:
resp = str(raw_input('[+] append string to word (y/n) : '))
if resp[0].lower() == 'y':
self.append = str(raw_input('[+] enter string : '))
except:
pass
# fileoutput
try:
resp = str(raw_input('[+] output to file (y/n) : '))
if resp[0].lower() == 'y':
self.outputfile = str(raw_input('[+] enter filename : '))
except:
pass
#
#
def dumpdict(self):
try:
fo=False
wc = 0
# if a filename is set, assume were outputting to file
if len(self.outputfile) > 0:
# append on resume
if self.resumeload:
mode = 'a'
else:
mode = 'w'
try:
f = open(self.outputfile, mode)
fo=True
except IOError:
print('[*][bfdict.dumpdict] error writing to file {}'.format(self.outputfile))
exit(0)
# write to file, else print to screen
wrd = self.nextword()
while wrd:
if fo:
f.write(wrd + '\n')
else:
print wrd
wc += 1
wrd = self.nextword()
# close file handler
if fo:
f.close()
fo = False
except KeyboardInterrupt:
# CTRL-C handler
print('\n\n')
print('[-][bfdict] Caught keyboard interrupt.')
print('[-][bfdict] Quitting after {} words.'.format(str(wc)))
if self.resumesave:
self.savestate()
return
except Exception as e:
print('[*][bfdict.dumpdict] Unexpected error!')
exit(0)
#
#
def nextword(self):
# if setup flag not set, run setup function
if not self.issetup:
self.setdict()
# generate word
if self.clen <= self.mxlen:
word = ''
for x in range(0, self.clen):
word = self.cl[self.ci[x]] + word
if self.prepend:
word = self.prepend + word
if self.append:
word = word + self.append
self.ci[0] += 1
if self.ci[0] > self.cl[0]:
for x in range(0, self.mxlen):
if self.ci[x] > self.cl[0]:
self.ci[x] = 1
self.ci[x+1] += 1
if (x+1) == self.clen:
self.clen += 1
return word
else:
return
#
#
def main():
custdict = ""
bf = bfdict()
parser = OptionParser()
parser.add_option("-i", action="store_true", dest="inter", help="Interactive setup mode [Use alone]", default=False)
parser.add_option("-m", action="store", type="int", dest="mnlen", help="Minimum word length", default=1)
parser.add_option("-x", action="store", type="int", dest="mxlen", help="Maximum word length", default=3)
parser.add_option("-l", action="store_true", dest="uselower", help="Use lowercase characters", default=False)
parser.add_option("-u", action="store_true", dest="useupper", help="Use uppercase characters", default=False)
parser.add_option("-n", action="store_true", dest="usenumber", help="Use number characters", default=False)
parser.add_option("-s", action="store_true", dest="usesymbol", help="Use standard symbols", default=False)
parser.add_option("-p", action="store", type="string", dest="prepend", help="String to prepend to generated word", default="")
parser.add_option("-a", action="store", type="string", dest="append", help="String to append to generated word", default="")
parser.add_option("-c", action="store", type="string", dest="custdict", help="Set custom character set", default='')
parser.add_option("-f", action="store", type="string", dest="outputfile", help="Output filename [Default is to screen]", metavar="FILE", default='')
parser.add_option("-R", action="store_true", dest="resumeload", help="Load from resume file", default=False)
parser.add_option("-S", action="store_true", dest="resumesave", help="Save resume data on quit", default=False)
(options, args) = parser.parse_args()
custdict = options.custdict
if options.resumeload:
if bf.loadstate():
bf.dumpdict()
else:
if options.resumesave:
bf.resumesave = True
# process options
if options.inter:
bf.interactivesetup()
elif options.custdict:
bf.mnlen = options.mnlen
bf.mxlen = options.mxlen
bf.uselower = False
bf.useupper = False
bf.usenumber = False
bf.usesymbol = False
bf.usecustom = True
if options.prepend:
bf.prepend = options.prepend
if options.append:
bf.append = options.append
for x in range(0, len(custdict)):
bf.customdict.append(custdict[x])
if options.outputfile:
bf.outputfile = options.outputfile
else:
bf.mnlen = options.mnlen
bf.mxlen = options.mxlen
bf.uselower = options.uselower
bf.useupper = options.useupper
bf.usenumber = options.usenumber
bf.usesymbol = options.usesymbol
if options.prepend:
bf.prepend = options.prepend
if options.append:
bf.append = options.append
if options.outputfile:
bf.outputfile = options.outputfile
if (len(sys.argv)>1):
bf.dumpdict()
else:
print banner
parser.print_help()
if __name__ == '__main__':
main()
# @=X
| [
"g0r@technophage.net"
] | g0r@technophage.net |
1edbe9210cdaf8b6747c0577918cd4156ca3452d | 57ddfddd1e11db649536a8ed6e19bf5312d82d71 | /AtCoder/ABC1/ABC184/D.py | b4c97e61d1c7838d65bbca5688a51931bd044ccf | [] | no_license | pgDora56/ProgrammingContest | f9e7f4bb77714dc5088c2287e641c0aa760d0f04 | fdf1ac5d1ad655c73208d98712110a3896b1683d | refs/heads/master | 2023-08-11T12:10:40.750151 | 2021-09-23T11:13:27 | 2021-09-23T11:13:27 | 139,927,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 841 | py | import sys
sys.setrecursionlimit(10**9)
memo = {}
def search(a,b,c,cnt):
tot = a+b+c
if a > b:
a, b = b, a
if b > c:
b, c = c, b
if a > b:
a, b = b, a
if a in memo:
if b in memo[a]:
if c in memo[a][b]:
return memo[a][b][c]
else:
memo[a][b] = {}
else:
memo[a] = {}
memo[a][b] = {}
chil = 0
if a==99:
chil += (cnt+1) * 99
elif a!=0:
chil += search(a+1,b,c,cnt+1) * a
if b==99:
chil += (cnt+1) * 99
elif b!=0:
chil += search(a,b+1,c,cnt+1) * b
if c==99:
chil += (cnt+1) * 99
elif c!=0:
chil += search(a,b,c+1,cnt+1) * c
res = chil / tot
memo[a][b][c] = res
return chil / tot
a, b, c = map(int, input().split())
print(search(a,b,c,0))
| [
"doradora.prog@gmail.com"
] | doradora.prog@gmail.com |
7859cf3fcda5fbb28d69823278adbde60eb165fa | 1a75eadbb072dfc105fa88ee3b7eef6211d697f9 | /smartmirror.py | f78a274f220e9f5572318afd4ca7296d7f617913 | [
"MIT"
] | permissive | ngattlen/SmartMirror | 78c476a41623197c7796d3a4c1c4e49bcef26848 | 04c394bd13de115beb4fefbd3b7471709057eec6 | refs/heads/master | 2020-04-04T08:17:54.828780 | 2018-11-21T12:02:17 | 2018-11-21T12:02:17 | 155,777,381 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,934 | py | #!/usr/bin/python3
from __future__ import print_function
from Tkinter import *
from googleapiclient.discovery import build
from httplib2 import Http
from oauth2client import file, client, tools
from PIL import Image, ImageTk
import datetime
import json
import locale
import time
import locale
import threading
import feedparser
import traceback
import requests
import dateutil.parser
from contextlib import contextmanager
ui_locale = 'de_CH.UTF-8'
news_country = 'CH'
time_format = 12
date_format = "%b %d, %Y"
weather_api_token = '33e763a812916aecbb004eb5fd263ed2'
weather_lang = 'de'
weather_unit = 'auto'
latitude = 47.3666700
longitude = 8.5500000
xlarge_text_size = 94
large_text_size = 48
medium_text_size = 28
small_text_size = 18
LOCALE_LOCK = threading.Lock()
# These pictures are used to display the weather on the mirror
icon_lookup = {
'clear-day': "pics/Sun.png", # clear sky day
'wind': "pics/Wind.png", #wind
'cloudy': "pics/Cloud.png", # cloudy day
'partly-cloudy-day': "pics/PartlySunny.png", # partly cloudy day
'rain': "pics/Rain.png", # rain day
'snow': "pics/Snow.png", # snow day
'snow-thin': "pics/Snow.png", # sleet day
'fog': "pics/Haze.png", # fog day
'clear-night': "pics/Moon.png", # clear sky night
'partly-cloudy-night': "pics/PartlyMoon.png", # scattered clouds night
'thunderstorm': "pics/Storm.png", # thunderstorm
'tornado': "pics/Tornado.png", # tornado
'hail': "pics/Hail.png" # hail
}
@contextmanager
def setlocale(name): # thread proof function to work with locale
with LOCALE_LOCK:
saved = locale.setlocale(locale.LC_ALL)
try:
yield locale.setlocale(locale.LC_ALL, name)
finally:
locale.setlocale(locale.LC_ALL, saved)
class Calendar(Frame):
"""
This Class is used to log on to Google and get the events from Google calendar.
"""
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.title = 'Calendar Events'
self.calendarLabel = Label(self, text=self.title, font=('Arial', medium_text_size), fg='white', bg='black')
self.calendarLabel.pack(side=TOP, anchor=E)
self.calenderEventContainer = Frame(self, bg='black')
self.calenderEventContainer.pack(side=TOP, anchor=E)
self.get_event()
def get_event(self):
# Authentication for Google Account
save_output = list()
SCOPES = 'https://www.googleapis.com/auth/calendar.readonly'
store = file.Storage('token.json')
creds = store.get()
if not creds or creds.invalid:
flow = client.flow_from_clientsecrets('credentials.json', SCOPES)
creds = tools.run_flow(flow, store)
service = build('calendar', 'v3', http=creds.authorize(Http()))
# Call the Calendar API
now = datetime.datetime.utcnow().isoformat() + 'Z' # 'Z' indicates UTC time
print('Getting the upcoming 10 events')
events_result = service.events().list(calendarId='primary', timeMin=now,
maxResults=10, singleEvents=True,
orderBy='startTime').execute()
events = events_result.get('items', [])
# Changes time format
for event in events:
start = event['start'].get('dateTime')
cut_time = start[:19]
save_time = datetime.datetime.strptime(cut_time, '%Y-%m-%dT%H:%M:%S') # Converts string into a date object
new_time = datetime.datetime.strftime(save_time, '%d %b %H:%M %Y') # Converts object into a string
event_of_name = event['summary']
output_event = event_of_name + ' ' + new_time
save_output.append(output_event)
for widget in self.calenderEventContainer.winfo_children():
widget.destroy()
for show_events in save_output:
calender_event = Event(self.calenderEventContainer, event_name=show_events)
calender_event.pack(side=TOP, anchor=E)
self.after(60000, self.get_event)
class Event(Frame):
"""
This Class displays the appointments on the mirror
"""
def __init__(self, parent, event_name=None):
Frame.__init__(self, parent, bg='black')
self.eventName = event_name
self.eventNameLabel = Label(self, text=self.eventName, font=('Helvetica', small_text_size), fg="white", bg="black")
self.eventNameLabel.pack(side=TOP, anchor=E)
class News(Frame):
"""
This class gets all the news information on the main page from 20 Minutes and displays it on the mirror.
"""
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, *args, **kwargs)
self.config(bg='black')
self.title = 'News'
self.newsLabel = Label(self, text=self.title, font=('Arial', medium_text_size), fg='white', bg='black')
self.newsLabel.pack(side=TOP, anchor=W)
self.headlinecontainer = Frame(self, bg='black')
self.headlinecontainer.pack(side=TOP)
self.get_headline()
# Gets news from 20 Minuten and using the feedparser module, parses the title of the news that we need.
def get_headline(self):
for widget in self.headlinecontainer.winfo_children():
widget.destroy()
url_headline = 'https://api.20min.ch/rss/view/1'
feed = feedparser.parse(url_headline)
for post in feed.entries[0:5]:
headlines = NewsHeadLines(self.headlinecontainer, post.title)
headlines.pack(side=TOP, anchor=W)
self.after(600000, self.get_headline)
class NewsHeadLines(Frame):
"""
This Class is used to display a picture next to the news
"""
def __init__(self, parent, event_name=None):
Frame.__init__(self, parent, bg='black')
image = Image.open('pics/rss.png')
image = image.resize((25, 25), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.picLabel = Label(self, bg='black', image=photo)
self.picLabel.image = photo
self.picLabel.pack(side=LEFT, anchor=N)
self.eventName = event_name
self.eventNameLabel = Label(self, text=self.eventName, font=('Arial', small_text_size), fg='white', bg='black')
self.eventNameLabel.pack(side=LEFT, anchor=N)
class Weather(Frame):
"""
This class gets weather data from DarkSkyNet using the REST API Interface and displays it on the mirror
"""
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
self.temp = ''
self.forecast = ''
self.location = ''
self.now = ''
self.icon = ''
self.degreeFrame = Frame(self, bg='black')
self.degreeFrame.pack(side=TOP, anchor=W)
self.tempLabel = Label(self.degreeFrame, font=('Arial', xlarge_text_size), fg='white', bg='black')
self.tempLabel.pack(side=LEFT, anchor=N)
self.iconLabel = Label(self.degreeFrame, bg='black')
self.iconLabel.pack(side=LEFT, anchor=N, padx=20)
self.nowLabel = Label(self, font=('Arial', medium_text_size), fg='white', bg='black')
self.nowLabel.pack(side=TOP, anchor=W)
self.forecastLabel = Label(self, font=('Arial', small_text_size), fg="white", bg="black")
self.forecastLabel.pack(side=TOP, anchor=W)
self.locationLabel = Label(self, font=('Arial', small_text_size), fg="white", bg="black")
self.locationLabel.pack(side=TOP, anchor=W)
self.get_weatherinfo()
# Gets weather infromation and uses the correct weather picture to display on the mirror.
def get_weatherinfo(self):
location_two = ''
req_weather = 'https://api.darksky.net/forecast/%s/%s,%s?lang=%s&units=%s' % (weather_api_token, latitude,
longitude, weather_lang,
weather_unit)
r = requests.get(req_weather)
weather_object = json.loads(r.text)
degree_sign = u'\N{DEGREE SIGN}'
temp_two = "%s%s" % (str(int(weather_object['currently']['temperature'])), degree_sign)
now_two = weather_object['currently']['summary']
forecast_two = weather_object["hourly"]["summary"]
icon_id = weather_object['currently']['icon']
icon2 = None
if icon_id in icon_lookup:
icon2 = icon_lookup[icon_id]
if icon2 is not None:
if self.icon != icon2:
self.icon = icon2
image = Image.open(icon2)
image = image.resize((100, 100), Image.ANTIALIAS)
image = image.convert('RGB')
photo = ImageTk.PhotoImage(image)
self.iconLabel.config(image=photo)
self.iconLabel.image = photo
else:
self.iconLabel.config(image='')
if self.now != now_two:
self.now = now_two
self.nowLabel.config(text=now_two)
if self.forecast != forecast_two:
self.forecast = forecast_two
self.forecastLabel.config(text=forecast_two)
if self.temp != temp_two:
self.temp = temp_two
self.tempLabel.config(text=temp_two)
if self.location != location_two:
if location_two == ", ":
self.location = "Cannot Pinpoint Location"
self.locationLabel.config(text="Cannot Pinpoint Location")
else:
self.location = location_two
self.locationLabel.config(text=location_two)
self.after(600000, self.get_weatherinfo)
class Time(Frame):
"""
This Class displays the local time on the mirror
"""
def __init__(self, parent, *args, **kwargs):
Frame.__init__(self, parent, bg='black')
#Time Label
self.time = ''
self.timeLabel = Label(self, font=('Arial', large_text_size), fg='white', bg='black')
self.timeLabel.pack(side=TOP, anchor=E)
#Week Label
self.day = ''
self.dayLabel = Label(self, text=self.day, font=('Arial', small_text_size), fg='white', bg='black')
self.dayLabel.pack(side=TOP, anchor=E)
#Date
self.date = ''
self.dateLabel = Label(self, text=self.date, font=('Arial', small_text_size), fg='white', bg='black')
self.dateLabel.pack(side=TOP, anchor=E)
self.exec_time()
# Gets local time from the system
def exec_time(self):
with setlocale(ui_locale):
if time_format > 12:
update_time = time.strftime('%I:%M %p')
else:
update_time = time.strftime('%H:%M')
show_day = time.strftime('%A')
show_date = time.strftime(date_format)
if update_time != self.time:
self.time = update_time
self.timeLabel.config(text=update_time)
if show_day != self.day:
self.day = show_day
self.dayLabel.config(text=show_day)
if show_date != self.date:
self.date = show_date
self.dateLabel.config(text=show_date)
self.timeLabel.after(200, self.exec_time)
class GUI:
"""
This class is used to display all the information in a window and executes all the above methods to run the
program.
"""
def __init__(self):
self.tk = Tk()
self.tk.configure(background='black')
self.topFrame = Frame(self.tk, background='black')
self.topFrame.pack(side=TOP, fill=BOTH, expand=YES)
self.bottomFrame = Frame(self.tk, background='black')
self.bottomFrame.pack(side=BOTTOM, fill=BOTH, expand=YES)
self.state = False
self.tk.bind('<Return>', self.fullscreen)
self.tk.bind('<Escape>', self.exit_Fullscreen)
#Time
self.time = Time(self.topFrame)
self.time.pack(side=RIGHT, anchor=N, padx=100, pady=60)
#Calendar
self.calender = Calendar(self.bottomFrame)
self.calender.pack(side=RIGHT, anchor=S, padx=100, pady=60)
#RSS
self.news = News(self.bottomFrame)
self.news.pack(side=LEFT, anchor=S, padx=100, pady=60)
#Weather
self.weather = Weather(self.topFrame)
self.weather.pack(side=LEFT, anchor=N, padx=100, pady=60)
def fullscreen(self, event=None):
self.state = not self.state
self.tk.attributes('-fullscreen', self.state)
return 'break'
def exit_Fullscreen(self, event=None):
self.state = False
self.tk.attributes('-fullscreen', False)
return 'break'
def main():
window = GUI()
window.tk.mainloop()
if __name__ == main():
main() | [
"test@smartmirror.home"
] | test@smartmirror.home |
779c6f61dc1e2fe0d373d6caa70760b75d488fb6 | b71cd96e711f45eb4dac82e219fc1eb636b5e468 | /Basic track/week 5/exercise 5.1/exercise 5.1.19.1.py | c83102af9f297000d7375273c26c5acbab097e42 | [] | no_license | ferdivanderspoel/pythonProject | ba87b5377df6f4f64cd7f2556506254afe29d861 | 14aefe3d051ea277cfa7a8ea5b17ca7954f15bf8 | refs/heads/master | 2023-01-09T03:18:25.225504 | 2020-11-12T13:37:19 | 2020-11-12T13:37:19 | 312,285,251 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 197 | py | "Python"[1]
"Strings are sequences of characters."[5]
len("wonderful")
"Mystery"[:4]
"p" in "Pineapple"
"apple" in "Pineapple"
"pear" not in "Pineapple"
"apple" > "pineapple"
"pineapple" < "Peach"
| [
"71446089+ferdivanderspoel@users.noreply.github.com"
] | 71446089+ferdivanderspoel@users.noreply.github.com |
f05f111a93fc7ae5a2d834868e706c5ac14c73a3 | f626480c66c59cea43b5fb1c3ed9a9c41dea7909 | /edX/probability_of_disease_given_positive_test.py | b59f9d5b4a5e9ac928084c57490ec76144ce0567 | [] | no_license | silverjam/Udacity_Data_Science | 5c07b202a7b258d607c09e25dbabf3c360140ccf | 3d985cd3afea3942b238975be844f65759e8184b | refs/heads/master | 2016-09-05T10:19:07.822578 | 2015-10-20T14:19:38 | 2015-10-20T14:19:38 | 39,846,625 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,372 | py | #!/usr/bin/env python2
from __future__ import print_function
from __future__ import division
# Probability of having a positive test
p_positive_test = 99/100.0
p_false_positive = 1 - p_positive_test
# Probability that you actually have the disease
p_have_disease = 1/10000.0
# Number of people in 10000 that will have a false positive
num_false_positives = int(10000 * p_false_positive)
num_false_positives
print('Number of false positives:', num_false_positives)
# For each of the 100 people that have false positives, they have a probability
# of (9999-N)/(10000-N) of not having the disease, for N in 0..99 inclusive.
def not_sick():
p_falsepos_and_no_disease = 1
num_dont_have_disease = 9999
total_population = 10000
for x in range(num_false_positives):
p_falsepos_and_no_disease *= num_dont_have_disease/total_population
num_dont_have_disease -= 1
total_population -=1
return( p_falsepos_and_no_disease )
p_falsepos_and_no_disease = not_sick()
print( p_falsepos_and_no_disease )
p_have_disease_after_positive_test = 1 - p_falsepos_and_no_disease
print( p_have_disease_after_positive_test )
p_have_disease_after_positive_test1 = ((1/10000.) * (99/100.)) / (99/100.)
print( p_have_disease_after_positive_test1 )
p_have_disease_after_positive_test2 = 99/(99+9999.0)
print( p_have_disease_after_positive_test2 )
| [
"x@jason.mobarak.name"
] | x@jason.mobarak.name |
19455d674d53a1c667c0bc4f69be24ca7e02635b | a3820180325e4d5a6558430d7bd05cd5a12ba9d2 | /methods/last_name.py | e96ccba5c59fab54cf47fd924e31f2e10e4e6db6 | [
"MIT"
] | permissive | gtavasoli/JSON-Generator | 11d92508a638109db4174837b1edc1c6f361907b | 03cc27fa204c94d0dc5a00b7e4150b9b7757e1d2 | refs/heads/master | 2020-06-23T14:03:43.579577 | 2020-01-28T00:30:23 | 2020-01-28T00:30:23 | 198,643,807 | 9 | 3 | null | null | null | null | UTF-8 | Python | false | false | 72 | py | from methods import fake
def last_name():
return fake.last_name()
| [
"ghht.com@gmail.com"
] | ghht.com@gmail.com |
d58fda147cc73be47e7b2588dc12e596b4f7aea2 | e4354294c70dd8c1eef139a94ae45297e0d2ef00 | /app.py | 325e92d38b5a53e23d7ee1b2b2201acb85829e9f | [] | no_license | MaxKmet/devops-lab2 | 8e3e38fae0637967f36cdc7750bd4b53f9faf86b | 1ed43c252a61762bd1c516e06e8ce5d7c773fd14 | refs/heads/master | 2023-04-15T08:58:27.148499 | 2021-04-14T07:46:39 | 2021-04-14T07:46:39 | 357,816,452 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,197 | py | from flask import Flask, request, render_template
from cosmos_requests import init_container, add_guest_cosmos, get_guest_list_cosmos, mark_guest_arrived_cosmos
from config import endpoint, key
app = Flask(__name__)
cosmos_container = init_container(endpoint, key)
@app.route("/")
def main_page():
return render_template('main_page.html')
@app.route('/add_guest', methods=['POST'])
def add_guest():
guest_name = request.form['nm']
add_guest_cosmos(cosmos_container, guest_name)
guest_lst = get_guest_list_cosmos(cosmos_container)
return render_template('guest_list.html', guest_list=guest_lst) # change
@app.route('/show_guest_list', methods=['POST'])
def show_guest_list():
guest_lst = get_guest_list_cosmos(cosmos_container)
return render_template('guest_list.html', guest_list=guest_lst) # change
@app.route('/mark_guest_arrived', methods=['POST'])
def mark_guest_arrived():
guest_name = request.form['nm']
mark_guest_arrived_cosmos(cosmos_container, guest_name)
guest_lst = get_guest_list_cosmos(cosmos_container)
return render_template('guest_list.html', guest_list=guest_lst) # change
if __name__ == '__main__':
app.run()
| [
"maxkmet01@gmail.com"
] | maxkmet01@gmail.com |
0c3f702c8a2b2c05f13162a27c55217341ff31eb | 0ba0582516b99138d9917238396227fddb2c603e | /video/settings.py | 3f279d3404a162d9df1d59903043df24286f3841 | [] | no_license | xm6264jz/video-app | 0bb0ec71ff7d2761005db79701d5a49174b727c5 | 11d84b6d0b6572435ab324a0eb48d9b660bfc16d | refs/heads/master | 2023-01-14T14:30:24.021804 | 2020-11-24T19:05:55 | 2020-11-24T19:05:55 | 314,917,282 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,087 | py | """
Django settings for video project.
Generated by 'django-admin startproject' using Django 3.1.3.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '6li==90is2aq5$t_nk^5px!q290kc*^@zp1nqxfu&273!q5clb'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'video_collection'
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'video.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'video.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
| [
"ahmed.abdinoor3@gmail.com"
] | ahmed.abdinoor3@gmail.com |
f64139a35c4373ac2f6b69e9c1b8e0b8a2ff93ff | de24f83a5e3768a2638ebcf13cbe717e75740168 | /moodledata/vpl_data/480/usersdata/321/110867/submittedfiles/Av2_Parte2.py | 130a05edcdad51dd3406a9fd3116a763a0ab7756 | [] | no_license | rafaelperazzo/programacao-web | 95643423a35c44613b0f64bed05bd34780fe2436 | 170dd5440afb9ee68a973f3de13a99aa4c735d79 | refs/heads/master | 2021-01-12T14:06:25.773146 | 2017-12-22T16:05:45 | 2017-12-22T16:05:45 | 69,566,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 71 | py | # -*- coding: utf-8 -*-valor
numero= int(input('Insira um número: '))
| [
"rafael.mota@ufca.edu.br"
] | rafael.mota@ufca.edu.br |
c89e66a500f81cc42ba195404712500bdf451605 | 68a020cadb04370d65bc4c7c47b12d4a57f3956e | /tool_dev_3/extip.py | 3554113212b05f975da073718c609b11a7583f77 | [] | no_license | grenoldi/info_security | 37d384cf1a23c042c2d7782d57f560ae4ed308e9 | 212c787d4245a6cf44ffc1c64a21c3719e6a3d25 | refs/heads/master | 2023-02-23T14:57:14.377951 | 2021-01-28T19:41:39 | 2021-01-28T19:41:39 | 333,887,109 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import re
import json
from urllib.request import urlopen
url = 'https://ipinfo.io/json'
response = urlopen(url)
data = json.load(response)
ip = data['ip']
org = data['org']
city = data['city']
country = data['country']
region = data['region']
print('IP details :\nIP: {4}\nRegion: {1}\nCountry: {2}\nCity: {3}\nOrganization: {0}'.format(org, region, country, city, ip))
| [
"guilherme.renoldi@gmail.com"
] | guilherme.renoldi@gmail.com |
6cbdb1487c6d3378423262ea3ae076dec93232d6 | 7c6b801ff36aa0a82ceb30c98e90091209320c7c | /cloudant121234.py | 36222d26b5123a8e34eafb378d33919373468894 | [] | no_license | SmartPracticeschool/llSPS-INT-2442-Smart-Waste-Management-System-For-Metropolitan-Cities | 5872fc64c1290991bb36b8f7fdc03eceb0025a8f | c6673bf9171b66b08a0c5a5f6643799b0d7fc3e6 | refs/heads/master | 2022-10-20T07:07:52.180598 | 2020-06-09T14:23:00 | 2020-06-09T14:23:00 | 267,571,204 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,459 | py | import time
import sys
import random
import ibmiotf.application
import ibmiotf.device
#Provide your IBM Watson Device Credentials
organization = "q2va6d" # repalce it with organization ID
deviceType = "rsip" #replace it with device type
deviceId = "108" #repalce with device id
authMethod = "token"
authToken = "9110705023"#repalce with token
def myCommandCallback(cmd):
print("Command received: %s" % cmd.data)
if cmd.data['command']=='cover':
print("the bin lid is closed")
elif cmd.data['command'] == 'uncover':
print("the bin lid is open")
try:
deviceOptions = {"org": organization, "type": deviceType, "id": deviceId, "auth-method": authMethod, "auth-token": authToken}
deviceCli = ibmiotf.device.Client(deviceOptions)
#..............................................
except Exception as e:
print("Caught exception connecting device: %s" % str(e))
sys.exit()
deviceCli.connect()
while True:
L = random.randint(0, 100);
F = random.randint(0, 100);
Q = random.randint(0, 100);
W = random.randint(0, 100);
E = random.randint(0, 100);
R = random.randint(0, 100);
T = random.randint(0, 100);
Y = random.randint(0, 100);
lat=17.3984
lon=78.5583
data = {'d':{ 'garbagelevel' : L, 'garbageweight': F,'lat': lat,'lon': lon,'a' : Q, 'b' : W, 'c' : E, 'd' : R,'e' : T, 'f' : Y, 'g' : Y}}
u=time.asctime(time.localtime(time.time()))
print(u)
#print data
def myOnPublishCallback():
print ("Published Your Garbage Level = %s %%" % L, "Garbage Weight = %s %%" % F, "to IBM Watson")
print ("Published Your Garbage Level of bin2 = %s %%" % Q, "Garbage Weight of bin2 = %s %%" % W, "to IBM Watson")
print ("Published Your Garbage Level of bin3 = %s %%" % E, "Garbage Weight of bin3 = %s %%" % R, "to IBM Watson")
print ("Published Your Garbage Level of bin4 = %s %%" % T, "Garbage Weight of bin4 = %s %%" % Y, "to IBM Watson")
success = deviceCli.publishEvent("event", "json", data, qos=0, on_publish=myOnPublishCallback)
if not success:
print("Not connected to IoTF")
time.sleep(5)
deviceCli.commandCallback = myCommandCallback
from cloudant.client import Cloudant
from cloudant.error import CloudantException
from cloudant.result import Result, ResultByKey
client = Cloudant("fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix", "cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792",
url="https://fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix:cd3fd31f55919b590bdd100e21c3278805fab74817ca0ca86c68309a46585792@fa3c80de-84b9-4280-be10-e9ee55d6726b-bluemix.cloudantnosqldb.appdomain.cloud")
client.connect()
database_name = "dustmanagement"
my_database = client.create_database(database_name)
if my_database.exists():
print(f"'{database_name}' successfully created.")
json_document = {'d':{ 'Garbage Level' : L, 'Garbage Weight': F }}
json_document = {'d':{ 'Garbage Level' : Q, 'Garbage Weight': W }}
json_document = {'d':{ 'Garbage Level' : E, 'Garbage Weight': R }}
json_document = {'d':{ 'Garbage Level' : T, 'Garbage Weight': Y }}
new_document = my_database.create_document(json_document)
if new_document.exists():
print(f"Document '{new_document}' successfully created.")
''' if L>=100:
print("your garbage is full")
import requests
url = "https://www.fast2sms.com/dev/bulk"
querystring = {"authorization":"G3k8jc6SOWqei20PQZJV4otdarXImlCYAygM9RuUxKnb1BvDhEWbJPYeFM1tLASXNKQzj5xp0Gm3Uw6B","sender_id":"FSTSMS","message":"This is test message","language":"english","route":"p","numbers":"9999999999,8919275560,7777777777"}
headers = {
'cache-control': "no-cache"
}
response = requests.request("GET", url, headers=headers, params=querystring)
print(response.text)'''
# Disconnect the device and application from the cloud
deviceCli.disconnect()
| [
"noreply@github.com"
] | SmartPracticeschool.noreply@github.com |
5a63bc1d2dddb2ed864673adea5c00202e2d59df | dad9463da18cefe7ad8a3c257e624dc2027c7b4d | /day62_ajax_excise/settings.py | a96495d533d567bfbfb422be0283606de7cd1155 | [] | no_license | wang12xishan/day62_ajax_excise | 351bba1acc5b51fc1db7210092ef251a139a367b | 0736f0bd2d5c64ea8130a378c2c4a366824d1d10 | refs/heads/master | 2021-01-23T01:01:11.927305 | 2017-03-22T18:08:42 | 2017-03-22T18:08:42 | 85,860,672 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,241 | py | """
Django settings for day62_ajax_excise project.
Generated by 'django-admin startproject' using Django 1.10.5.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.10/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '#4!j8o#orw9fqm-lhucm4e3(br4)z^9pd4f#y)lta_&u5qz-u4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'app01',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'day62_ajax_excise.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates')]
,
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'day62_ajax_excise.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.10/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.10/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.10/howto/static-files/
STATIC_URL = '/static/'
STATICFILES_DIRS = (os.path.join(BASE_DIR,"static"),) | [
"wang19860520+cn@gmail.com"
] | wang19860520+cn@gmail.com |
79caad0a3d3552b90e0a7cecd92ee014a071a18a | 3870fc995ae39a4cd289ee837a8d4729b16b3e39 | /cheml/nn/nn_dsgd.py | f51b4910bada35c1f1ad54771d4e0a31d3ff3da9 | [
"BSD-3-Clause"
] | permissive | arpitban/chemml | e195f60761de32b554f0217d995c179429b2293a | 4468d7bbadfc43dbbb7cd2b0302616ceed318281 | refs/heads/master | 2020-03-14T03:51:25.847391 | 2018-03-26T02:26:52 | 2018-03-26T02:26:52 | 131,428,741 | 1 | 0 | null | 2018-04-28T17:07:33 | 2018-04-28T17:07:33 | null | UTF-8 | Python | false | false | 2,555 | py | import numpy as np
from mpi4py import MPI
import warnings
import multiprocessing
import nn_psgd
from ..utils import chunk
def train(X,Y,nneurons,input_act_funcs,validation_size=0.2,learn_rate=0.001,rms_decay=0.9,n_epochs=10000,
batch_size=256,n_hist=20,n_check=50,threshold=0.1, print_level=1):
"""
Main distributed memory function
Parameters
----------
All available parameters for nn_psgd - n_cores
The number of cores will be directly passed to the mpirun command
Returns
-------
trained_network: a list of dicts with trained weights and the activation functions from
each node
"""
# MPI
comm=MPI.COMM_WORLD
rank=comm.rank
size=comm.size
cpu_count = multiprocessing.cpu_count()
cpu_count = comm.gather(cpu_count,root=0)
if rank == 0:
N = len(X)
n_cores = sum(cpu_count)
chunk_list= list( chunk(range(N),n_cores) )
indices =[]
for i,c in enumerate(cpu_count):
indices = []
for j in range(c):
indices+=chunk_list.pop()
if i!=0:
comm.send(X[indices],dest=i, tag = 7)
comm.send(Y[indices],dest=i, tag = 77)
else:
Xnew = X[indices]
Ynew = Y[indices]
X = Xnew
Y = Ynew
else:
X = comm.recv(source=0, tag = 7)
Y = comm.recv(source=0, tag = 77)
trained_network = nn_psgd.train(X,Y,nneurons=nneurons,
input_act_funcs=input_act_funcs,learn_rate=learn_rate,rms_decay=rms_decay,
n_epochs=n_epochs,batch_size=batch_size,n_cores=multiprocessing.cpu_count(),n_hist=n_hist,
n_check=n_check,threshold=threshold, print_level=print_level)
trained_network = comm.gather(trained_network,root=0)
if rank==0:
return trained_network
def output(X,nnets):
"""(nn_dsgd_output)
User accessible output for neural network given trained weights.
Parameters
----------
X: array
Input features
nnets: list of dict
A list of neural networks from each cluster. keys required weights and
activation functions
Returns
-------
predicted values in array type
"""
#MPI
comm = MPI.COMM_WORLD
rank = comm.rank
size = comm.size
if rank == 0:
results = []
for nn in nnets:
results+= [nn_psgd._output(X,nn['weights'],nn_psgd.act_funcs_from_string(nn['act_funcs'],len(nn['weights'])-1))]
return results | [
"mojtabah@buffalo.edu"
] | mojtabah@buffalo.edu |
301253a83544e03770224a2f11d006038a2c8527 | 8116ebf0eae0dddff6ad0a1d71ef0f1b56b874a3 | /csv_writer.py | ba46601fd4767ddc10e3f27f027fd990861b3f09 | [] | no_license | wesleycox-unr/Week6 | da2e24468454f11bdbd9ba7a52c7029dca3a01e6 | 6cacafa7d70cc1d9658cccf63641d26fea3f4afe | refs/heads/master | 2020-08-05T21:42:41.531928 | 2019-10-04T04:04:00 | 2019-10-04T04:04:00 | 212,721,890 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 312 | py | #csv writer
import csv
data = [
["Name","Address","Age"],
["Jane Smith", "123 Fake St", "23"],
["Slim Dusty","564 Cunnamulla Fella St","44"]
]
with open("people_CSV.csv","w") as outfile:
writer = csv.writer(outfile, delimiter=',')
for row in data:
writer.writerow(row) | [
"wesleycox@unr.edu"
] | wesleycox@unr.edu |
ce94432a5578b5b6bffa78325e68f387bae19ee9 | 87daea58b4b1481160eb6a26c58bca0de6533fb8 | /salt/utils/doc.py | 876984c88a0326fe00956ef73a4d001adf0d144d | [
"Apache-2.0",
"BSD-2-Clause",
"MIT"
] | permissive | getlantern/salt | 4640cf1606d4401c1fba21dfd0464142c7b753a2 | 201008b35d799bee5c33e4060241a21e08661def | refs/heads/develop | 2023-08-30T13:23:14.966320 | 2014-05-22T22:48:19 | 2014-05-22T22:48:19 | 15,692,073 | 0 | 2 | NOASSERTION | 2023-09-06T17:29:20 | 2014-01-07T01:39:20 | Python | UTF-8 | Python | false | false | 871 | py | # -*- coding: utf-8 -*-
import re
def strip_rst(docs):
'''
Strip/replace reStructuredText directives in docstrings
'''
for func, docstring in docs.iteritems():
if not docstring:
continue
docstring_new = re.sub(r' *.. code-block:: \S+\n{1,2}',
'', docstring)
docstring_new = re.sub('.. note::',
'Note:', docstring_new)
docstring_new = re.sub('.. warning::',
'Warning:', docstring_new)
docstring_new = re.sub('.. versionadded::',
'New in version', docstring_new)
docstring_new = re.sub('.. versionchanged::',
'Changed in version', docstring_new)
if docstring != docstring_new:
docs[func] = docstring_new
return docs
| [
"mgwilliams@gmail.com"
] | mgwilliams@gmail.com |
aaf1e8d56ba7f1bd2c23998b5c393c26aeb44201 | 1ae8bacc71b6205f2c722c49ff329f10e5dadf0d | /fn-raster-vector-summary-stats/index.py | c82000842b28a99edd009e601ea1878b09bba2b5 | [
"MIT"
] | permissive | disarm-platform/fn-raster-vector-summary-stats | b23bff62950fdef79cad29275dabebf540e0ba27 | 0937a30d95a842e9b5978ab22528cb42ffe40f9f | refs/heads/master | 2021-07-18T15:50:09.981586 | 2020-06-02T18:58:17 | 2020-06-02T18:58:17 | 170,125,671 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 2,143 | py | import json
import shutil
import sys
from json import JSONDecodeError
from urllib.error import URLError
import config
from function import handler
from function import preprocess_params
def get_params_from_stdin() -> dict:
buf = ""
while True:
line = sys.stdin.readline()
buf += line
if line == "":
break
return json.loads(buf)
def handle_error(error, message='Unknown error, please ask the admins to check container logs for more info'):
# This will be written to container logs
sys.stderr.write(str(error))
# This will be sent back to caller/server
start = "Error from function: "
if type(error) is not ValueError:
result = start + str(message)
else:
result = start + str(error)
print(json.dumps({"function_status": "error",
"result": result}))
# Please give me content that JSON-dumpable:
# e.g. a string, could be base64-encoded, or some JSON-like object
def handle_success(result):
print(json.dumps({"function_status": "success",
"result": result}))
if __name__ == "__main__":
try:
# Get and parse params
params = get_params_from_stdin()
# Mutate the params to get them ready for use
preprocess_params.preprocess(params)
# Run!
function_response = handler.run_function(params)
handle_success(function_response)
except JSONDecodeError as e:
handle_error(e, "Request received by function is not valid JSON. Please check docs")
except URLError as e:
handle_error(e, "Problem downloading files. Please check URLs passed as parameters are "
"valid, are live and are publicly accessible.")
# Bare exceptions are not recommended - see https://www.python.org/dev/peps/pep-0008/#programming-recommendations
# We're using one to make sure that _any_ errors are packaged and returned to the calling server,
# not just logged at the function gateway
except Exception as err:
handle_error(err, "Unknown error")
finally:
shutil.rmtree(config.TEMP)
| [
"jonathan@peoplesized.com"
] | jonathan@peoplesized.com |
facac2aece7dd61b321a71e3cab804828ac6fbf2 | dda78cbb5ef889831b1e015178cc34775dfa7cee | /dashboard/models.py | d2cf85c8d166b5277359858d9cc9dda0f033b2bb | [] | no_license | LearningData/schoolbag_queue | b2f591094d2ebcf68aaea6b02ea8b3495fe18959 | 5068364f518291eba16f1190eb4a2c1f64e29d86 | refs/heads/master | 2021-03-27T09:26:58.108206 | 2016-04-22T14:55:21 | 2016-04-22T14:55:21 | 54,565,904 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,011 | py | # This is an auto-generated Django model module.
# You'll have to do the following manually to clean this up:
# * Rearrange models' order
# * Make sure each model has one field with primary_key=True
# * Make sure each ForeignKey has `on_delete` set to the desired behavior.
# * Remove `managed = False` lines if you wish to allow Django to create, modify, and delete the table
# Feel free to rename the models, but don't rename db_table values or field names.
from __future__ import unicode_literals
from django.db import models
class DjangoMigrations(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField()
class Meta:
managed = False
db_table = 'django_migrations'
class Friends(models.Model):
teacherfrom = models.IntegerField(db_column='TeacherFrom') # Field name made lowercase.
teacherto = models.IntegerField(db_column='TeacherTo') # Field name made lowercase.
verified = models.IntegerField(db_column='Verified') # Field name made lowercase.
class Meta:
managed = False
db_table = 'friends'
class Links(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
display = models.CharField(max_length=15)
link = models.TextField()
class Meta:
managed = False
db_table = 'links'
unique_together = (('schoolid', 'display'),)
class Notes(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
studentid = models.IntegerField(db_column='studentID') # Field name made lowercase.
timeslotid = models.IntegerField(db_column='timeslotID') # Field name made lowercase.
date = models.DateField()
text = models.CharField(max_length=100)
status = models.IntegerField()
classid = models.IntegerField(db_column='classID') # Field name made lowercase.
class Meta:
managed = False
db_table = 'notes'
unique_together = (('schoolid', 'studentid', 'timeslotid', 'date'),)
class Pnotes(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
studentid = models.IntegerField(db_column='studentID') # Field name made lowercase.
timeslotid = models.IntegerField(db_column='timeslotID') # Field name made lowercase.
date = models.DateField()
text = models.CharField(max_length=100)
status = models.IntegerField()
classid = models.IntegerField(db_column='classID') # Field name made lowercase.
class Meta:
managed = False
db_table = 'pnotes'
unique_together = (('schoolid', 'studentid', 'timeslotid', 'date'),)
class Posts(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
topicid = models.IntegerField(db_column='topicID') # Field name made lowercase.
date = models.DateTimeField()
text = models.CharField(max_length=255)
postowner = models.IntegerField(db_column='postOwner') # Field name made lowercase.
fileattached = models.CharField(db_column='fileAttached', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'posts'
class Present(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
studentid = models.IntegerField(db_column='studentID') # Field name made lowercase.
date = models.DateField()
present = models.IntegerField()
timeslotid = models.CharField(db_column='timeslotID', max_length=4) # Field name made lowercase.
notes = models.CharField(max_length=255)
class Meta:
managed = False
db_table = 'present'
class Topics(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
topicid = models.AutoField(db_column='topicID', primary_key=True) # Field name made lowercase.
date = models.DateTimeField()
title = models.CharField(max_length=30)
text = models.CharField(max_length=255)
topicowner = models.IntegerField(db_column='topicOwner') # Field name made lowercase.
fileattached = models.CharField(db_column='fileAttached', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'topics'
class UserPhoto(models.Model):
name = models.CharField(max_length=100)
size = models.IntegerField(blank=True, null=True)
type = models.CharField(max_length=40, blank=True, null=True)
file = models.CharField(max_length=1)
class Meta:
managed = False
db_table = 'user_photo'
class WorkPointPosts(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
topicid = models.IntegerField(db_column='topicID') # Field name made lowercase.
date = models.DateTimeField()
text = models.CharField(max_length=1024)
postowner = models.IntegerField(db_column='postOwner') # Field name made lowercase.
fileattached = models.CharField(db_column='fileAttached', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'work_point_posts'
class WorkPointTopics(models.Model):
schoolid = models.IntegerField(db_column='schoolID') # Field name made lowercase.
topicid = models.AutoField(db_column='topicID', primary_key=True) # Field name made lowercase.
class_id = models.IntegerField()
date = models.DateTimeField()
title = models.CharField(max_length=30)
text = models.CharField(max_length=1024)
topicowner = models.IntegerField(db_column='topicOwner') # Field name made lowercase.
fileattached = models.CharField(db_column='fileAttached', max_length=100, blank=True, null=True) # Field name made lowercase.
class Meta:
managed = False
db_table = 'work_point_topics'
| [
"edgar@learningdata.ie"
] | edgar@learningdata.ie |
87ddeeb6496c681d6d54cffb82c660e6c6709fa8 | b0f8ea322efa4c2a27997ca68d64d07d6fb47e01 | /images/urls.py | 6f978e7f6834f2266d9119d64c6e5911b1fe13d0 | [] | no_license | cgeb/bookmarks | 8c56eb092ac4402a6444924cfc7047cf5a527003 | 497340a12192b76d7a1a287a6573e6cb01e9b976 | refs/heads/main | 2023-07-07T14:57:06.555686 | 2021-08-15T04:58:21 | 2021-08-15T04:58:21 | 396,220,247 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | from django.urls import path
from . import views
app_name = 'images'
urlpatterns = [
path('', views.image_list, name='list'),
path('create/', views.image_create, name='create'),
path('detail/<int:id>/<slug:slug>/', views.image_detail, name="detail"),
path('like/', views.image_like, name='like'),
path('ranking/', views.image_ranking, name='ranking'),
]
| [
"ckg61386@gmail.com"
] | ckg61386@gmail.com |
9a5b1d2e7d6dea3e986d99e0bb25fe5acc6bb443 | 63b0f544dc8ad899dd605d36e6048077c7a9ed6e | /tests/test_shrinking.py | 1c5b0a732701a01bc5dd6b9c42af810e40883b84 | [] | no_license | DRMacIver/structureshrink | c2372d7e4686879cb035292573d32a60459f1024 | 625e01236d6a7d72295782277737595f81d77d2a | refs/heads/master | 2020-05-22T02:47:24.446684 | 2016-06-16T12:16:39 | 2016-06-16T12:16:39 | 55,408,891 | 101 | 6 | null | 2016-04-18T20:24:31 | 2016-04-04T12:20:29 | Python | UTF-8 | Python | false | false | 734 | py | from structureshrink import shrink
from hypothesis import given, strategies as st
import hashlib
@given(st.binary(), st.random_module())
def test_partition_by_length(b, _):
shrunk = shrink(b, len)
assert len(shrunk) == len(b) + 1
@given(
st.lists(st.binary(min_size=1, max_size=4), min_size=1, max_size=5),
st.random_module()
)
def test_shrink_to_any_substring(ls, _):
shrunk = shrink(
b''.join(ls), lambda x: sum(l in x for l in ls)
)
assert len(shrunk) >= len(ls)
def test_partition_by_last_byte():
seed = b''.join(bytes([i, j]) for i in range(256) for j in range(256))
shrunk = shrink(
seed, lambda s: hashlib.sha1(s).digest()[-1] & 127
)
assert len(shrunk) == 128
| [
"david@drmaciver.com"
] | david@drmaciver.com |
468799ae4b8f2389b07d0e398889b2188bbb090e | 56358cacf4a270c0f74993e0b2fd5f9884d2544f | /representasi-embedding-teks/main.py | ee7afb819c1c9cf31f6610e58163575db1f9d817 | [] | no_license | rizalespe/pytorch-stuff | 576dc1ef7c51a401ec128e9465f91d37d48123ee | d64e8977556a11590fb8e95cfc02c9fd6509ea7e | refs/heads/master | 2021-06-24T09:31:28.316171 | 2021-02-22T10:54:26 | 2021-02-22T10:54:26 | 194,181,853 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,392 | py | # AUTHOR: Rizal Setya Perdana (rizalespe@ub.ac.id)
# This code written for showing the process of generating embedding
# representation of text data
import torch
import torch.nn as nn
import torch.nn.utils.rnn as rnn_utils
import csv
import pickle
from helper import Vocabulary, TextPreprocess
"""
Datasource example:
https://github.com/rizalespe/Dataset-Sentimen-Analisis-Bahasa-Indonesia/blob/master/dataset_tweet_sentiment_pilkada_DKI_2017.csv
"""
datasource = 'dataset_tweet_sentiment_pilkada_DKI_2017.csv'
minimum_treshold = 5
with open(datasource) as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
line_count = 0
tweet_collection = []
# Args: list of document, contain_header(True/False)
# Return: text vocabulary with index
for row in csv_reader:
text_tweet = row[3]
tweet_collection.append(text_tweet)
print("Jumlah dokumen tweet dalam list: ", len(tweet_collection))
""" Generating the vocabulary file (index, word) from a csv file and save
only word >= minimum threshold value
"""
Vocabulary().generate(list_document= tweet_collection,
threshold=minimum_treshold,
contain_header=True,
save_to_file='vocab.pkl')
"""Mapping list of document to index based on the vocabulary file
"""
vocabulary_file= 'vocab.pkl'
maps = Vocabulary().map(vocabulary_file=vocabulary_file,
list_document=tweet_collection,
contain_header=True)
with open(vocabulary_file, 'rb') as f:
vocab = pickle.load(f)
vocab_size = len(vocab)
print("Jumlah kata yang ada pada vocabulary: ", vocab_size)
#instantiate embedding layer
embed = nn.Embedding(vocab_size, embedding_dim=10)
print("Ukuran layer embedding: ", embed)
# generate list of document
list_docs = []
for x in maps:
list_docs.append(torch.LongTensor(x))
"""Pad the sequences: proses ini meratakan dokumen yang memiliki panjang
kata berbeda-beda. Setelah melalui proses pad sequence ini, seluruh dokumen
pada corpus akan memiliki panjang yang sama.
"""
list_docs = rnn_utils.pad_sequence(list_docs, batch_first=True)
embedded_doc = embed(list_docs)
print("Output embedding: ", embedded_doc.shape)
| [
"rizalespe@gmail.com"
] | rizalespe@gmail.com |
fcc15469fa8df5260a99b501310bdaf372acfeaa | b6f5e6d1939e82ac8e8414880450b3470c96d8e4 | /landavailability/tests/lr/test_serializers.py | a59433c10afdf3a90c1b71f03e979f00c276f33a | [
"MIT"
] | permissive | uk-gov-mirror/alphagov.land-availability-lr | 01eafea5122d8a05f5b56084af8528742b92c799 | 8d782b80c2f8289133a4ea52006972ced6a9189a | refs/heads/master | 2021-06-29T20:32:47.167923 | 2017-09-07T09:15:54 | 2017-09-07T09:15:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,383 | py | from unittest import TestCase
import pytest
import json
from lr.models import Polygon, Title, Uprn
from lr.serializers import PolygonCreationSerializer, UprnCreationSerializer
class TestPolygonSerializer(TestCase):
@pytest.mark.django_db
def test_polygon_creation_serializer_create_object(self):
json_payload = """
{
"id": 12345,
"title": "ABC123",
"insert": "2004-11-08T00:00:00",
"update": "2004-11-09T00:00:00",
"status": "C",
"geom": {
"type": "Polygon",
"coordinates": [
[
[-0.22341515058230163, 52.93036769987315],
[-0.22039561538021543, 52.93215130879717],
[-0.21891135174799967, 52.93122765287705],
[-0.22193998154995934, 52.92945074233686],
[-0.22341515058230163, 52.93036769987315]
]
]
},
"srid": 27700
}
"""
data = json.loads(json_payload)
serializer = PolygonCreationSerializer(data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(Polygon.objects.count(), 1)
self.assertEqual(Title.objects.count(), 1)
class TestUprnSerializer(TestCase):
@pytest.mark.django_db
def test_uprn_creation_serializer_create_object(self):
json_payload = """
{
"uprn": 12345,
"title": "ABC123"
}
"""
title = Title(id="ABC123")
title.save()
data = json.loads(json_payload)
serializer = UprnCreationSerializer(data=data)
self.assertTrue(serializer.is_valid())
serializer.save()
self.assertEqual(Uprn.objects.count(), 1)
self.assertEqual(Title.objects.count(), 1)
@pytest.mark.django_db
def test_uprn_creation_serializer_create_object_invalid_title(self):
json_payload = """
{
"uprn": 12345,
"title": "ABC123"
}
"""
data = json.loads(json_payload)
serializer = UprnCreationSerializer(data=data)
self.assertFalse(serializer.is_valid())
| [
"andrea.grandi@digital.cabinet-office.gov.uk"
] | andrea.grandi@digital.cabinet-office.gov.uk |
b22e2138f9c4c2578dd2761ab351bdc609613b66 | 381b75fe68a4da258e2e60a97105b66ac47214e4 | /qa/rpc-tests/getblocktemplate_proposals.py | bd844d49dd91db1fa1eb0f16535ccea2625de16b | [
"MIT"
] | permissive | lipcoin/lipcoin | 3a5997dfc9193ee7dee6f9fa0adc1cb5fb8c92a3 | 7afc0a02d63620e5a5601474cca131cb0cf3bbe4 | refs/heads/master | 2021-01-24T07:57:56.248620 | 2018-03-17T19:04:38 | 2018-03-17T19:04:38 | 112,155,869 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,726 | py | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The LipCoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_framework.test_framework import LipCoinTestFramework
from test_framework.util import *
from binascii import a2b_hex, b2a_hex
from hashlib import sha256
from struct import pack
def b2x(b):
return b2a_hex(b).decode('ascii')
# NOTE: This does not work for signed numbers (set the high bit) or zero (use b'\0')
def encodeUNum(n):
s = bytearray(b'\1')
while n > 127:
s[0] += 1
s.append(n % 256)
n //= 256
s.append(n)
return bytes(s)
def varlenEncode(n):
if n < 0xfd:
return pack('<B', n)
if n <= 0xffff:
return b'\xfd' + pack('<H', n)
if n <= 0xffffffff:
return b'\xfe' + pack('<L', n)
return b'\xff' + pack('<Q', n)
def dblsha(b):
return sha256(sha256(b).digest()).digest()
def genmrklroot(leaflist):
cur = leaflist
while len(cur) > 1:
n = []
if len(cur) & 1:
cur.append(cur[-1])
for i in range(0, len(cur), 2):
n.append(dblsha(cur[i] + cur[i+1]))
cur = n
return cur[0]
def template_to_bytearray(tmpl, txlist):
blkver = pack('<L', tmpl['version'])
mrklroot = genmrklroot(list(dblsha(a) for a in txlist))
timestamp = pack('<L', tmpl['curtime'])
nonce = b'\0\0\0\0'
blk = blkver + a2b_hex(tmpl['previousblockhash'])[::-1] + mrklroot + timestamp + a2b_hex(tmpl['bits'])[::-1] + nonce
blk += varlenEncode(len(txlist))
for tx in txlist:
blk += tx
return bytearray(blk)
def template_to_hex(tmpl, txlist):
return b2x(template_to_bytearray(tmpl, txlist))
def assert_template(node, tmpl, txlist, expect):
rsp = node.getblocktemplate({'data':template_to_hex(tmpl, txlist),'mode':'proposal'})
if rsp != expect:
raise AssertionError('unexpected: %s' % (rsp,))
class GetBlockTemplateProposalTest(LipCoinTestFramework):
'''
Test block proposals with getblocktemplate.
'''
def __init__(self):
super().__init__()
self.num_nodes = 2
self.setup_clean_chain = False
def setup_network(self):
self.nodes = self.setup_nodes()
connect_nodes_bi(self.nodes, 0, 1)
def run_test(self):
node = self.nodes[0]
node.generate(1) # Mine a block to leave initial block download
tmpl = node.getblocktemplate()
if 'coinbasetxn' not in tmpl:
rawcoinbase = encodeUNum(tmpl['height'])
rawcoinbase += b'\x01-'
hexcoinbase = b2x(rawcoinbase)
hexoutval = b2x(pack('<Q', tmpl['coinbasevalue']))
tmpl['coinbasetxn'] = {'data': '01000000' + '01' + '0000000000000000000000000000000000000000000000000000000000000000ffffffff' + ('%02x' % (len(rawcoinbase),)) + hexcoinbase + 'fffffffe' + '01' + hexoutval + '00' + '00000000'}
txlist = list(bytearray(a2b_hex(a['data'])) for a in (tmpl['coinbasetxn'],) + tuple(tmpl['transactions']))
# Test 0: Capability advertised
assert('proposal' in tmpl['capabilities'])
# NOTE: This test currently FAILS (regtest mode doesn't enforce block height in coinbase)
## Test 1: Bad height in coinbase
#txlist[0][4+1+36+1+1] += 1
#assert_template(node, tmpl, txlist, 'FIXME')
#txlist[0][4+1+36+1+1] -= 1
# Test 2: Bad input hash for gen tx
txlist[0][4+1] += 1
assert_template(node, tmpl, txlist, 'bad-cb-missing')
txlist[0][4+1] -= 1
# Test 3: Truncated final tx
lastbyte = txlist[-1].pop()
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist[-1].append(lastbyte)
# Test 4: Add an invalid tx to the end (duplicate of gen tx)
txlist.append(txlist[0])
assert_template(node, tmpl, txlist, 'bad-txns-duplicate')
txlist.pop()
# Test 5: Add an invalid tx to the end (non-duplicate)
txlist.append(bytearray(txlist[0]))
txlist[-1][4+1] = 0xff
assert_template(node, tmpl, txlist, 'bad-txns-inputs-missingorspent')
txlist.pop()
# Test 6: Future tx lock time
txlist[0][-4:] = b'\xff\xff\xff\xff'
assert_template(node, tmpl, txlist, 'bad-txns-nonfinal')
txlist[0][-4:] = b'\0\0\0\0'
# Test 7: Bad tx count
txlist.append(b'')
assert_raises(JSONRPCException, assert_template, node, tmpl, txlist, 'n/a')
txlist.pop()
# Test 8: Bad bits
realbits = tmpl['bits']
tmpl['bits'] = '1c0000ff' # impossible in the real world
assert_template(node, tmpl, txlist, 'bad-diffbits')
tmpl['bits'] = realbits
# Test 9: Bad merkle root
rawtmpl = template_to_bytearray(tmpl, txlist)
rawtmpl[4+32] = (rawtmpl[4+32] + 1) % 0x100
rsp = node.getblocktemplate({'data':b2x(rawtmpl),'mode':'proposal'})
if rsp != 'bad-txnmrklroot':
raise AssertionError('unexpected: %s' % (rsp,))
# Test 10: Bad timestamps
realtime = tmpl['curtime']
tmpl['curtime'] = 0x7fffffff
assert_template(node, tmpl, txlist, 'time-too-new')
tmpl['curtime'] = 0
assert_template(node, tmpl, txlist, 'time-too-old')
tmpl['curtime'] = realtime
# Test 11: Valid block
assert_template(node, tmpl, txlist, None)
# Test 12: Orphan block
tmpl['previousblockhash'] = 'ff00' * 16
assert_template(node, tmpl, txlist, 'inconclusive-not-best-prevblk')
if __name__ == '__main__':
GetBlockTemplateProposalTest().main()
| [
"support@lipcoins.org"
] | support@lipcoins.org |
d20835a2b0ff1dc195aed11268bfdbedc3348f38 | 6dd3b5bb9424b18c64172dad6e07bb974f48cf6b | /stock/core/messages/registered_shelve.py | 11f4c738a43ce0fe3da6f8f3910bec0e3bb135bd | [
"Apache-2.0"
] | permissive | rh-tech-supermarket-restocking-demo/stock-backend | b8a081621445a890736532e5e01b4b4d71072b5e | 0d868b270c4142fd5efdad28966482c280478c85 | refs/heads/main | 2023-06-24T16:22:21.757472 | 2021-07-28T13:33:56 | 2021-07-28T13:33:56 | 380,600,682 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 636 | py | from dataclasses import dataclass
from dataclasses_json import dataclass_json, LetterCase
from stock.core.product import SKU, Category
from stock.core.shelve import RestockThreshold, ProductAmount, Shelve
@dataclass_json(letter_case=LetterCase.CAMEL)
@dataclass(frozen=True)
class RegisteredShelve:
product_sku: SKU
product_category: Category
shelve_restock_threshold: RestockThreshold
shelve_stock_amount: ProductAmount
@classmethod
def from_shelve(Cls, shelve: Shelve):
return Cls(
shelve.product.sku, shelve.product.category,
shelve.restock_threshold, shelve.stock_amount)
| [
"mkossatz@redhat.com"
] | mkossatz@redhat.com |
38d391543f66af5e1e9236ef60d90efb6883a886 | 29c56a53a0535c1a69cec9a6089daa36247d34a7 | /main/migrations/0008_auto_20210210_1250.py | 1d16502f78329ebfdc022c1a1923d26ec7297f14 | [] | no_license | maanasvi999/jobsearchapp | 1afd38405925f377bb0ee234023aa4acd7bf2f1c | 67196a42eb211faba2368941c7efa1d64f19b016 | refs/heads/master | 2023-03-02T23:36:27.516284 | 2021-02-12T21:48:15 | 2021-02-12T21:48:15 | 337,946,012 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,394 | py | # Generated by Django 3.1.4 on 2021-02-10 07:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('main', '0007_applicantprofile_appliedjobs_employerprofile_savedjobs_user'),
]
operations = [
migrations.RemoveField(
model_name='appliedjobs',
name='job_company',
),
migrations.RemoveField(
model_name='appliedjobs',
name='user',
),
migrations.RemoveField(
model_name='employerprofile',
name='user',
),
migrations.RemoveField(
model_name='savedjobs',
name='job_company',
),
migrations.RemoveField(
model_name='savedjobs',
name='user',
),
migrations.RemoveField(
model_name='user',
name='groups',
),
migrations.RemoveField(
model_name='user',
name='user_permissions',
),
migrations.DeleteModel(
name='ApplicantProfile',
),
migrations.DeleteModel(
name='AppliedJobs',
),
migrations.DeleteModel(
name='EmployerProfile',
),
migrations.DeleteModel(
name='SavedJobs',
),
migrations.DeleteModel(
name='User',
),
]
| [
"maanasvi999@gmail.com"
] | maanasvi999@gmail.com |
5879088035742e55a4c0b12a3c6d8604f1bf5bac | 0f506c452e3864d4bdcce7332445c35dbeaf38a7 | /backend/crime_chain/Intel_SGX/apps.py | 0e5ac63c65d1772954ac13a70310cadd50c5e3e7 | [] | no_license | jurosutantra/CrimeChain | 848924a9ce4beed7509ab3823b73c62d06acb17b | aecd27a50ee9413297816da4dd47c103d56398eb | refs/heads/master | 2023-02-13T00:34:20.391688 | 2020-09-03T16:29:09 | 2020-09-03T16:29:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 92 | py | from django.apps import AppConfig
class IntelSgxConfig(AppConfig):
name = 'Intel_SGX'
| [
"esmonddsouza42@gmail.com"
] | esmonddsouza42@gmail.com |
3d3ed85bb76718a4e5973252aefc6b9b998ef6c6 | 0e478f3d8b6c323c093455428c9094c45de13bac | /src/OTLMOW/OTLModel/Datatypes/KlOmegaElementMateriaal.py | 0e4f888f71af1341513eee503beab2556145d36f | [
"MIT"
] | permissive | davidvlaminck/OTLMOW | c6eae90b2cab8a741271002cde454427ca8b75ba | 48f8c357c475da1d2a1bc7820556843d4b37838d | refs/heads/main | 2023-01-12T05:08:40.442734 | 2023-01-10T15:26:39 | 2023-01-10T15:26:39 | 432,681,113 | 3 | 1 | MIT | 2022-06-20T20:36:00 | 2021-11-28T10:28:24 | Python | UTF-8 | Python | false | false | 2,285 | py | # coding=utf-8
import random
from OTLMOW.OTLModel.Datatypes.KeuzelijstField import KeuzelijstField
from OTLMOW.OTLModel.Datatypes.KeuzelijstWaarde import KeuzelijstWaarde
# Generated with OTLEnumerationCreator. To modify: extend, do not edit
class KlOmegaElementMateriaal(KeuzelijstField):
"""De gebruikte materialen van het omega-element."""
naam = 'KlOmegaElementMateriaal'
label = 'Omega element materiaal'
objectUri = 'https://wegenenverkeer.data.vlaanderen.be/ns/onderdeel#KlOmegaElementMateriaal'
definition = 'De gebruikte materialen van het omega-element.'
status = 'ingebruik'
codelist = 'https://wegenenverkeer.data.vlaanderen.be/id/conceptscheme/KlOmegaElementMateriaal'
options = {
'aluminium': KeuzelijstWaarde(invulwaarde='aluminium',
label='aluminium',
status='ingebruik',
definitie='Omega-element vervaarigd uit aluminium.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/aluminium'),
'roestvrij-staal': KeuzelijstWaarde(invulwaarde='roestvrij-staal',
label='roestvrij staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit roestvrij staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/roestvrij-staal'),
'verzinkt-staal': KeuzelijstWaarde(invulwaarde='verzinkt-staal',
label='verzinkt staal',
status='ingebruik',
definitie='Omega-element vervaarigd uit verzinkt staal.',
objectUri='https://wegenenverkeer.data.vlaanderen.be/id/concept/KlOmegaElementMateriaal/verzinkt-staal')
}
@classmethod
def create_dummy_data(cls):
return random.choice(list(map(lambda x: x.invulwaarde,
filter(lambda option: option.status == 'ingebruik', cls.options.values()))))
| [
"david.vlaminck@mow.vlaanderen.be"
] | david.vlaminck@mow.vlaanderen.be |
ebad958140d23dddd740353748b51a374efc07f3 | ede0a087ea29305857fb4586e730ef1ebf068a86 | /Connection/ssh_connection.py | 8b4c66f0b9203fe0addc57dbe5172ce6e81ca8e2 | [] | no_license | manuel1801/Bachelor_Arbeit | f3e5e33aa57877c890890b8c3584c672cd038816 | 92200717fd34544f9bbea9aa7ad2156bf72080df | refs/heads/master | 2021-07-10T03:29:20.605528 | 2021-03-01T09:06:51 | 2021-03-01T09:06:51 | 219,782,897 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,720 | py | import requests
import json
import os
import sys
import pexpect
from time import sleep
import smtplib
from email.mime.text import MIMEText
class SSHConnect:
def __init__(self, email, password):
self.developer_key = 'NEU3RTVFNEMtNjRGRi00MzBFLUIyNTgtOUVFQjRGMjcxOTRB'
self.token = None
self.device_adress = None
self.conn_id = None
self.email = email
self.password = password
# self.public_ip = requests.get('https://api.ipify.org').text
def login(self, device_name, retry=5):
headers = {
"developerkey": self.developer_key
}
body = {
"password": self.password,
"username": self.email
}
url = "https://api.remot3.it/apv/v27/user/login"
for i in range(retry):
print('try to login for ' + str(i+1) + '. time')
try:
log_resp = requests.post(
url, data=json.dumps(body), headers=headers)
break
except:
print('login failed: ' + str(i+1) + '. try')
if i == retry - 1:
return False
sleep(0.5)
log_resp = log_resp.json()
if log_resp['status'] == 'false':
print('wrong remote.it user name or password')
return False
else:
self.token = log_resp['token']
headers = {
"developerkey": self.developer_key,
# Created using the login API
"token": self.token
}
url = "https://api.remot3.it/apv/v27/device/list/all"
try:
dev_resp = requests.get(url, headers=headers)
except:
print('failed to get device list')
return False
dev_resp = dev_resp.json()
for device in dev_resp['devices']:
if device['devicealias'] == device_name:
self.device_adress = device['deviceaddress']
return True
print('Device: ', device_name, ' not Exist')
return False
def connect(self):
if not self.token or not self.device_adress:
print('token or device adress not found. login again')
return False
host_ip = requests.get('https://api.ipify.org').text
# print('host ip is ', host_ip)
headers = {
"developerkey": self.developer_key,
# Created using the login API
"token": self.token
}
body = {
"deviceaddress": self.device_adress,
"wait": "true",
"hostip": host_ip
# "hostip": None
}
url = "https://api.remot3.it/apv/v27/device/connect"
try:
conn_resp = requests.post(
url, data=json.dumps(body), headers=headers)
except:
print('conn req failed')
return False
conn_resp = conn_resp.json()
if conn_resp['status'] == 'false':
print('conn status false')
return False
self.conn_id = conn_resp['connectionid']
return conn_resp['connection']['proxy'].split('//')[1].split(':')
def disconnect(self):
if not self.device_adress and not self.conn_id:
print('no device to disconnect')
return False
headers = {
"developerkey": self.developer_key,
# Created using the login API
"token": self.token
}
body = {
"deviceaddress": self.device_adress,
"connectionid": self.conn_id
}
url = "https://api.remot3.it/apv/v27/device/connect/stop"
response = requests.post(url, data=json.dumps(body), headers=headers)
response_body = response.json()
def send(self, server, port, user, password, file, path):
command = 'scp -o StrictHostKeyChecking=no -P {} {} {}@{}:{}'.format(
port, file, user, server, path)
try:
child = pexpect.spawn(command)
r = child.expect(
["{}@{}'s password:".format(user, server), pexpect.EOF])
if r == 0:
child.sendline(password)
child.expect(pexpect.EOF)
return True
elif r == 1:
print('end of file')
return False
except Exception as e:
print(e)
return False
def send_email(self, email, text):
msg = MIMEText(text)
msg['Subject'] = 'Animal Detected'
with smtplib.SMTP('smtp.gmail.com', 587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login(self.email, self.password)
smtp.sendmail(self.email, email, msg.as_string())
def main():
email = ''
password_remote_divece = ''
password_remoteit = ''
user = ''
remote_user = ''
remote_divice_name = ''
remote_output_dir = os.path.join('/home', remote_user)
conn = SSHConnect(email, password_remoteit)
# Für SSH
file_path = os.path.join(os.path.dirname(sys.argv[0]), 'test.jpg')
assert os.path.isfile(file_path)
logged_in = conn.login(remote_divice_name)
if logged_in:
print('Success: logging in!')
ret = conn.connect()
else:
print('Error: logging in!')
exit()
server, port = ret
if conn.send(server, port, remote_user, password_remote_divece,
file_path, remote_output_dir):
print('Success: sending!')
else:
print('Error: sending!')
conn.disconnect()
# Für Email:
# conn.send_email('ziel@addresse.com', 'hello world!')
if __name__ == "__main__":
main()
| [
"manuel.barkey@web.de"
] | manuel.barkey@web.de |
18f933ca7f86ee5a21971b8b6c078922ac541672 | 8caf717cc19d1149a4dcd0ed14771b6a583ef016 | /time_sheet/migrations/0013_auto_20190302_1532.py | 608f7609e18416fe67413b0a88873708f413ef4b | [] | no_license | cmclaug3/CES | 2f2c2d2f682e7943498fbaae0249c23656e4f9f8 | f795354eda73949f7c5acfa9141a5a2c12e36332 | refs/heads/master | 2020-04-03T06:59:00.077974 | 2019-04-07T21:23:00 | 2019-04-07T21:23:00 | 153,554,739 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 375 | py | # Generated by Django 2.1.2 on 2019-03-02 22:32
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('time_sheet', '0012_auto_20190302_1509'),
]
operations = [
migrations.AlterField(
model_name='workday',
name='date',
field=models.DateField(),
),
]
| [
"coreymclaughlin@Coreys-MacBook-Pro.local"
] | coreymclaughlin@Coreys-MacBook-Pro.local |
bf7636f3f80aa31b41bfea8c5de09a9c2c78081e | be5e5aebd753ed1f376dc18ce411f0fac6d2f762 | /natuurpunt_purchase/__openerp__.py | 2c5775f49e8c2d135a4f1389ae4e637f0ac437cf | [] | no_license | smart-solution/natuurpunt-purchase | 7d9fcfdde769b6294d8dc705cecc99a177b4573c | 0ac94cb68cee4ef464158720e04007ee12036179 | refs/heads/master | 2021-05-22T04:43:21.594422 | 2020-11-02T13:32:27 | 2020-11-02T13:32:27 | 39,186,322 | 0 | 2 | null | 2020-11-02T13:32:28 | 2015-07-16T08:42:31 | Python | UTF-8 | Python | false | false | 1,548 | py | # -*- coding: utf-8 -*-
##############################################################################
#
# Smart Solution bvba
# Copyright (C) 2010-Today Smart Solution BVBA (<http://www.smartsolution.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
"name" : "natuurpunt_purchase",
"version" : "1.0",
"author" : "Smart Solution (fabian.semal@smartsolution.be)",
"website" : "www.smartsolution.be",
"category" : "Generic Modules/Base",
"description": """
""",
"depends" : ["purchase_requisition"],
"data" : [
'natuurpunt_purchase_view.xml',
'natuurpunt_purchase_data.xml',
'natuurpunt_purchase_report.xml',
'security/natuurpunt_purchase_security.xml',
# 'security/ir.model.access.csv'
],
"active": False,
"installable": True
}
| [
"fabian.semal@smartsolution.be"
] | fabian.semal@smartsolution.be |
f26c2293ca65d8c6dbbd1698a7bd712bc9b58434 | 800966018624753cbfff4cb4b0176950114e9ba8 | /myvenv/bin/wheel | f08c71349781ccf9f638c9a0e81b06a9aed9db61 | [] | no_license | caxapakaared/stepic | 802b2d212f0adb84264f60fddf3e964c93272b50 | 962e16007c3791a48776e42046194e9b86d996aa | refs/heads/master | 2021-01-01T16:41:58.650936 | 2017-07-21T01:35:21 | 2017-07-21T01:35:21 | 97,893,359 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 233 | #!/Users/Caxap/py/web/myvenv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.tool import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"rebusiztysyachidetaley@bk.ru"
] | rebusiztysyachidetaley@bk.ru | |
61cebe0f34302d9f3d0dc3118f23e318b837dc7f | 8bd7d5fac8c64ab7fc703cfefaf8b3680d225198 | /evaluation.py | 53bebc94aec46ecfbc9697153f0b64e4c1f4872f | [] | no_license | atsushi421/AlgorithmSimulator2 | 9625bf5086f41fa2251f5e97e6a5cf576eb21ef7 | 95f5e1b94e52915ecd2f02372d67a498fea28c44 | refs/heads/master | 2023-06-25T18:47:24.605948 | 2021-07-18T00:20:11 | 2021-07-18T00:20:11 | 369,368,965 | 0 | 0 | null | 2021-07-18T00:20:12 | 2021-05-21T00:14:30 | PostScript | UTF-8 | Python | false | false | 6,392 | py | # -*- coding: utf-8 -*-
import sys
from class_DAG import DAG
from class_DAG import one_entry_DAG
from class_ClusteredManyCore import ClusteredManyCoreProcesser
from HEFT import HEFT
from QLHEFT import QLHEFT
from class_Proposed import Proposed
from class_Q_learning import Q_learning
from class_Scheduler import Scheduler
class Evaluater:
# <コンストラクタ>
def __init__(self, args):
'''
ALGORITHM_NAME : 使用するアルゴリズム
EVA_NAME : 評価名
TARGET : 対象のプロセッサ
DAG : 対象のDAG
LOG_PATH : ログファイルへのパス
RESULT_PATH : 結果を書き込むファイルへのパス
'''
self.ALGORITHM_NAME = args[1]
self.EVA_NAME = args[2]
self.TARGET = ClusteredManyCoreProcesser(int(args[4]), int(args[5]), float(args[6]))
self.DAG = DAG(args[3])
self.update_dag(float(args[7]), float(args[8]))
self.LOG_PATH = "./result/" + self.EVA_NAME + "/" + self.ALGORITHM_NAME + "/log/" + self.DAG.file_name + "_" + str(self.TARGET.num_of_cluster) + "_" + str(self.TARGET.num_of_core) + "_" + ("{:.2f}".format(self.DAG.CCR)) + ".txt"
self.write_param()
self.RESULT_PATH = self.set_result_path(float(args[7]), float(args[8]))
self.evaluate()
# <メソッド>
# アルゴリズム名に基づいて評価を行う
def evaluate(self):
scheduling_list = []
if(self.ALGORITHM_NAME == "HEFT"):
scheduling_list = HEFT(self.DAG, self.TARGET)
scheduler = Scheduler(scheduling_list, self.DAG, self.TARGET)
# 結果を書き込み
f = open(self.RESULT_PATH, "a")
f.write(self.DAG.file_name + "\t" + str(scheduler.makespan()) + "\n")
f.close()
if(self.ALGORITHM_NAME == "QLHEFT"):
scheduling_list = QLHEFT(self.DAG, self.TARGET)
scheduler = Scheduler(scheduling_list, self.DAG, self.TARGET)
# 結果を書き込み
f = open(self.RESULT_PATH, "a")
f.write(self.DAG.file_name + "\t" + str(scheduler.makespan()) + "\n")
f.close()
if(self.ALGORITHM_NAME == "Proposed"):
proposed = Proposed(self.DAG, self.TARGET)
scheduling_list = proposed.best_scheduling_list()
scheduler = Scheduler(scheduling_list, self.DAG, self.TARGET)
# 結果を書き込み
f = open(self.RESULT_PATH, "a")
f.write(self.DAG.file_name + "\t" + str(scheduler.makespan()) + "\n")
f.close()
# FACTORに基づいてDAGを更新
def update_dag(self, factor_edge, factor_node):
for i in range(self.DAG.num_of_node):
for j in range(self.DAG.num_of_node):
self.DAG.edge[i][j] = int(self.DAG.edge[i][j] * factor_edge)
for i in range(self.DAG.num_of_node):
self.DAG.node[i] = int(self.DAG.node[i] * factor_node)
# rankuの再計算
self.DAG.ranku = [0] * self.DAG.num_of_node # 初期化
for i in range(self.DAG.num_of_node):
if(self.DAG.entry[i] == 1):
self.DAG.ranku_calc(i)
self.DAG.ccr_calc() # CCRの再計算
# 評価名に基づいて, result_path を決める
def set_result_path(self, factor_edge, factor_node):
if(self.EVA_NAME == "change_CCR"):
if(factor_edge == 0.8 and factor_node == 2):
return "./result/change_CCR/" + self.ALGORITHM_NAME + "/CCR_0.25.txt"
if(factor_edge == 1 and factor_node == 1.3):
return "./result/change_CCR/" + self.ALGORITHM_NAME + "/CCR_0.5.txt"
if(factor_edge == 1.5 and factor_node == 1):
return "./result/change_CCR/" + self.ALGORITHM_NAME + "/CCR_1.0.txt"
if(factor_edge == 2 and factor_node == 0.7):
return "./result/change_CCR/" + self.ALGORITHM_NAME + "/CCR_2.0.txt"
if(factor_edge == 3 and factor_node == 0.5):
return "./result/change_CCR/" + self.ALGORITHM_NAME + "/CCR_4.0.txt"
if(self.EVA_NAME == "change_InoutRatio"):
return "./result/change_InoutRatio/" + self.ALGORITHM_NAME + "/InoutRatio_" + str(self.TARGET.inout_ratio) + ".txt"
if(self.EVA_NAME == "change_NumCore"):
return "./result/change_NumCore/" + self.ALGORITHM_NAME + "/NumCore_" + str(self.TARGET.num_of_core) + ".txt"
if(self.EVA_NAME == "change_NumNode"):
if('20_' in self.DAG.file_name):
return "./result/change_NumNode/" + self.ALGORITHM_NAME + "/NumNode_20.txt"
if('50_' in self.DAG.file_name):
return "./result/change_NumNode/" + self.ALGORITHM_NAME + "/NumNode_50.txt"
if('100_' in self.DAG.file_name):
return "./result/change_NumNode/" + self.ALGORITHM_NAME + "/NumNode_100.txt"
if('200_' in self.DAG.file_name):
return "./result/change_NumNode/" + self.ALGORITHM_NAME + "/NumNode_200.txt"
if(self.EVA_NAME == "random"):
return "./result/random/" + self.ALGORITHM_NAME + "/random.txt"
# 評価パラメータをログに書き込む
def write_param(self):
f = open(self.LOG_PATH, "w")
f.write("<評価パラメータ>\n")
f.write("DAG_name : " + args[3] + ".tgff\n")
f.write("NUM_OF_CC : " + args[4] + "\n")
f.write("NUM_OF_CORE : " + args[5] + "\n")
f.write("inout_ratio : " + args[6] + "\n")
f.write("factor_edge : " + args[7] + "\n")
f.write("factor_node : " + args[8] + "\n")
f.write("CCR : " + str(self.DAG.CCR) + "\n")
f.write("\n")
f.close()
'''
args[1] : 使用するアルゴリズム. [HEFT, QLHEFT, Proposed]
args[2] : 評価名. [change_CCR, change_InoutRatio, change_NumCore, change_NumNode]
args[3] : 実行するDAGのファイル名
args[4] : クラスタ数
args[5] : 1クラスタ内のコア数
args[6] : クラスタ外の通信時間とクラスタ内の通信時間の比率
args[7] : すべてのedgeに掛ける係数
args[8] : すべてのnodeに掛ける係数
'''
args = sys.argv
Evaluater(args) | [
"a.yano.578@ms.saitama-u.ac.jp"
] | a.yano.578@ms.saitama-u.ac.jp |
07617650fec3c637bed8a6e5d3f7dab3d07d274f | 6aed8c33a2cdc7f8841b1f6f29fb8a152325e7f0 | /sql/PostgreSQL basics.py | 84fd739ef060f8149327e7362ba541fad9c6dd2a | [] | no_license | Mikemraz/Weapons-for-Data-Scientists | 0d188d8b19112fb837cd6e69d7d47a9a537df2f8 | 572a5598cfac702549180ebfae887c87ac5f4ba9 | refs/heads/master | 2020-04-22T23:49:00.683084 | 2019-03-23T21:46:57 | 2019-03-23T21:46:57 | 170,754,792 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 332 | py | import psycopg2 as pg
from sqlalchemy import create_engine
import pandas as pd
# import a pandas dataframe into postgres db.
df = pd.read_csv('calendar.csv')
table_name = 'calendar'
engine = create_engine('postgresql://postgres:jlm041544@localhost:5432/postgres')
con = engine.connect()
df.to_sql(table_name, engine)
conn.commit()
| [
"shazi0415@qq.com"
] | shazi0415@qq.com |
981bbfed69a5508f0cfab20fc831cfd657c03bfd | 690c4fd238926624c1d3fa594aeb9d7140618b5b | /day04/mysite4/mysite4/settings.py | b6283d1c8dc99f4cc72597551584c5d90b1ccbf3 | [] | no_license | dalaAM/month_04 | 66c4630a169294f4e4dca26c77989ad5879da2ca | 322532fedd095cd9307ee4f2633026debe56f551 | refs/heads/master | 2022-12-04T06:02:12.995054 | 2020-08-23T04:06:19 | 2020-08-23T04:06:19 | 286,018,771 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,203 | py | """
Django settings for mysite4 project.
Generated by 'django-admin startproject' using Django 2.2.13.
For more information on this file, see
https://docs.djangoproject.com/en/2.2/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/2.2/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/2.2/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'lc!7ik)7n=drgz5wna+v5$_oejjd&c9hr$i2y8ag#rz4!fj4co'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'bookstore',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite4.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite4.wsgi.application'
# Database
# https://docs.djangoproject.com/en/2.2/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql',
'NAME': 'mysite4',
'USER': 'root',
'PASSWORD': '123456',
'HOST': '127.0.0.1',
'PORT': '3306',
}
}
# Password validation
# https://docs.djangoproject.com/en/2.2/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'zh-Hans'
TIME_ZONE = 'Asia/Shanghai'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
| [
"1105504468@qq.com"
] | 1105504468@qq.com |
2a7b308b9a147c9384f1af15affa987a9c80bc18 | 78144baee82268a550400bbdb8c68de524adc68f | /Production/python/Autumn18/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8_ext1_cff.py | 0404dac24ac1af443c07c6d7567e3d26aecf82b0 | [] | no_license | tklijnsma/TreeMaker | e6989c03189b849aff2007bad22e2bfc6922a244 | 248f2c04cc690ef2e2202b452d6f52837c4c08e5 | refs/heads/Run2_2017 | 2023-05-26T23:03:42.512963 | 2020-05-12T18:44:15 | 2020-05-12T18:44:15 | 263,960,056 | 1 | 2 | null | 2020-09-25T00:27:35 | 2020-05-14T15:57:20 | null | UTF-8 | Python | false | false | 2,416 | py | import FWCore.ParameterSet.Config as cms
maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1) )
readFiles = cms.untracked.vstring()
secFiles = cms.untracked.vstring()
source = cms.Source ("PoolSource",fileNames = readFiles, secondaryFileNames = secFiles)
readFiles.extend( [
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/280000/97CF62B7-13A7-1144-9021-CDF16708F4B0.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/0DC1264B-98DD-054D-934F-B46D16AEA2DA.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/2C2BC671-C18E-FF47-947E-B293CD33BEE2.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/2CB0A228-DDDD-0946-A030-6B0ED1F50B8A.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/626CC6DD-7373-0A44-99B0-933D20F1088D.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/6F7C5F93-53F0-AE45-BA6B-A95CCDCBD59A.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/7E24A0DA-B32D-5D44-BAF0-7AE8C465D170.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/8FD11F87-C024-1042-A459-FCFDC8445277.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/92CF84BB-1255-3243-9E69-C4C05B8922D1.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/9CBFC750-9804-CF47-8FB7-9C862D1137F2.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/E0CDD379-4CDE-2E4C-8014-F9573A6E9943.root',
'/store/mc/RunIIAutumn18MiniAOD/WWW_4F_TuneCP5_13TeV-amcatnlo-pythia8/MINIAODSIM/102X_upgrade2018_realistic_v15_ext1-v2/90000/ED4F9CB6-82B7-054D-A20A-254A0AF0FED3.root',
] )
| [
"Alexx.Perloff@Colorado.edu"
] | Alexx.Perloff@Colorado.edu |
387622b9565cfcaa2fe10c694aeb971fe457181e | b22588340d7925b614a735bbbde1b351ad657ffc | /athena/MuonSpectrometer/MuonCnv/MuonByteStream/share/WriteMuonByteStream_jobOptions.py | a8c537456ede0b7ccc707e97e9cfe4a5455e6a66 | [] | no_license | rushioda/PIXELVALID_athena | 90befe12042c1249cbb3655dde1428bb9b9a42ce | 22df23187ef85e9c3120122c8375ea0e7d8ea440 | refs/heads/master | 2020-12-14T22:01:15.365949 | 2020-01-19T03:59:35 | 2020-01-19T03:59:35 | 234,836,993 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 221 | py | theApp.Dlls += [ "MuonByteStream" ]
StreamBS = Algorithm( "StreamBS" )
StreamBS.ItemList +=["4187#*"]
StreamBS.ItemList +=["4190#*"]
StreamBS.ItemList +=["4186#*"]
StreamBS.ItemList +=["4183#*"]
StreamBS.ForceRead=True
| [
"rushioda@lxplus754.cern.ch"
] | rushioda@lxplus754.cern.ch |
7e8a01806ffb18ebfea77ea92066a506c67188b9 | 418318089fe13bacdeb96878df932b79f399dcaf | /apocalypse/utils/deamonize.py | 687a7b02267f339a7fd7a4f63bbed12060651232 | [
"MIT"
] | permissive | dhoomakethu/apocalypse | 4fb3b0252493cc4bb3ee92606f9d2e35f4d798ec | cf8491998c1b5d9abca768c60fc7ed9258aa3c35 | refs/heads/master | 2021-01-14T11:07:08.837438 | 2016-09-21T07:18:27 | 2016-09-21T07:18:27 | 67,292,897 | 6 | 2 | null | null | null | null | UTF-8 | Python | false | false | 3,630 | py | """
@author: dhoomakethu
"""
from __future__ import absolute_import, unicode_literals
import sys
import os
import time
from signal import SIGTERM
# http://code.activestate.com/recipes/66012-fork-a-daemon-process-on-unix/
def deamonize(stdout='/dev/null', stderr=None, stdin='/dev/null',
pidfile=None, startmsg = 'started with pid %s' ):
"""
This forks the current process into a daemon.
The stdin, stdout, and stderr arguments are file names that
will be opened and be used to replace the standard file descriptors
in sys.stdin, sys.stdout, and sys.stderr.
These arguments are optional and default to /dev/null.
Note that stderr is opened unbuffered, so
if it shares a file with stdout then interleaved output
may not appear in the order that you expect.
"""
# Do first fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit first parent.
except OSError, e:
sys.stderr.write("fork #1 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Decouple from parent environment.
os.chdir("/")
os.umask(0)
os.setsid()
# Do second fork.
try:
pid = os.fork()
if pid > 0:
sys.exit(0) # Exit second parent.
except OSError, e:
sys.stderr.write("fork #2 failed: (%d) %s\n" % (e.errno, e.strerror))
sys.exit(1)
# Open file descriptors and print start message
if not stderr: stderr = stdout
si = file(stdin, 'r')
so = file(stdout, 'a+')
se = file(stderr, 'a+', 0)
pid = str(os.getpid())
sys.stderr.write("\n%s\n" % startmsg % pid)
sys.stderr.flush()
if pidfile:
file(pidfile,'w+').write("%s\n" % pid)
# Redirect standard file descriptors.
os.dup2(si.fileno(), sys.stdin.fileno())
os.dup2(so.fileno(), sys.stdout.fileno())
os.dup2(se.fileno(), sys.stderr.fileno())
def startstop(stdout='/dev/null', stderr=None, stdin='/dev/null',
pidfile='pid.txt', startmsg = 'started with pid %s',
action='start'):
if action:
try:
pf = file(pidfile,'r')
pid = int(pf.read().strip())
pf.close()
except IOError:
pid = None
if 'stop' == action or 'restart' == action:
if not pid:
mess = "Could not stop, pid file '%s' missing.\n"
sys.stderr.write(mess % pidfile)
if 'stop' == action:
sys.exit(1)
action = 'start'
pid = None
else:
try:
while 1:
os.kill(pid,SIGTERM)
time.sleep(1)
except OSError, err:
err = str(err)
if err.find("No such process") > 0:
os.remove(pidfile)
if 'stop' == action:
sys.exit(0)
action = 'start'
pid = None
else:
print str(err)
sys.exit(1)
if 'start' == action:
if pid:
mess = "Start aborted since pid file '%s' exists.\n"
sys.stderr.write(mess % pidfile)
sys.exit(1)
deamonize(stdout, stderr, stdin, pidfile, startmsg)
return
if 'status' == action:
if not pid:
sys.stderr.write('Status: Stopped\n')
else: sys.stderr.write('Status: Running\n')
sys.exit(0)
| [
"sanjay@riptideio.com"
] | sanjay@riptideio.com |
2afa300a8883684e7f49ad027fecedf2bf89b631 | 78e8e0560e76bc7d30508441c01950767e6cba1d | /apps/services/urls.py | 334783a4c9af7b2f6491c6378854e19709d287ff | [] | no_license | alainalberto/save | c95106d2e23f5e737ca2e01ed801feb1793cc1a8 | 53c2973442ea91873aa41a9c6eda2c847a461dfd | refs/heads/master | 2021-04-29T19:43:50.021689 | 2018-02-15T01:32:03 | 2018-02-15T01:32:03 | 121,581,360 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,124 | py | from django.conf.urls import *
from django.contrib.auth.decorators import login_required, permission_required
from apps.services.views import *
from apps.services.components.ServicePDF import *
urlpatterns = [
url(r'^service/pending/$', login_required((PendingListPDF)),name='pending_pdf'),
url(r'^email/(?P<pk>\d+)&(?P<fl>[^/]+)/$', login_required((EmailSend)),name='email_send'),
#Permit
url(r'^permit/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('services.add_permit')(PermitView)), name='permit'),
url(r'^permit/create$', login_required(permission_required('services.add_permit')(PermitCreate.as_view())), name='permit_create'),
url(r'^permit/create/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('services.add_permit')(PermitCreate.as_view())), name='permit_create_popup'),
url(r'^permit/edit/(?P<pk>\d+)/$', login_required(permission_required('services.change_permit')(PermitEdit.as_view())), name='permit_edit'),
url(r'^permit/edit/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('services.change_permit')(PermitEdit.as_view())), name='permit_edit_popup'),
url(r'^permit/(?P<pk>\d+)/$', login_required(permission_required('services.delete_permit')(PermitDelete.as_view())), name='permit_delete'),
#Forms
url(r'^forms/$', login_required(FormView.as_view()), name='forms'),
url(r'^forms/create$', login_required(permission_required('tools.add_file')(FormCreate.as_view())), name='file_create'),
url(r'^forms/edit/(?P<pk>\d+)/$', login_required(permission_required('tools.change_file')(FormEdit.as_view())), name='file_edit'),
url(r'^forms/edit/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('tools.change_file')(FormEdit.as_view())), name='file_edit_popup'),
url(r'^forms/(?P<pk>\d+)/$', login_required(permission_required('tools.delete_file')(FormDelete.as_view())), name='file_delete'),
#Folder
url(r'^folder/$', login_required(permission_required('tools.add_file')(FolderView.as_view())), name='folder'),
url(r'^folder/create$', login_required(permission_required('tools.add_file')(FolderCreate.as_view())), name='folder_create'),
url(r'^folder/create/(?P<pk>\d+)/$', login_required(permission_required('tools.add_file')(FolderCreate.as_view())), name='folder_create_popup'),
url(r'^folder/edit/(?P<pk>\d+)/$', login_required(permission_required('tools.add_file')(FolderEdit.as_view())), name='folder_edit'),
url(r'^folder/edit/(?P<pk>\d+)&(?P<popup>[^/]+)/$',login_required(permission_required('tools.add_file')(FolderEdit.as_view())), name='folder_edit_popup'),
url(r'^folder/(?P<pk>\d+)/$', login_required(permission_required('tools.add_file')(FolderDelete.as_view())), name='folder_delete'),
#Equipment
url(r'^equipment/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('services.add_equipment')(EquipmentView)), name='equipment'),
url(r'^equipment/create$', login_required(permission_required('services.add_equipment')(EquipmentCreate.as_view())), name='equipment_create'),
url(r'^equipment/create/(?P<pk>\d+)&(?P<popup>[^/]+)/$',
login_required(permission_required('services.add_equipment')(EquipmentCreate.as_view())), name='equipment_create_popup'),
url(r'^equipment/edit/(?P<pk>\d+)/$',
login_required(permission_required('services.change_equipment')(EquipmentEdit.as_view())), name='equipment_edit'),
url(r'^equipment/edit/(?P<pk>\d+)&(?P<popup>[^/]+)/$',
login_required(permission_required('services.change_equipment')(EquipmentEdit.as_view())), name='equipment_edit_popup'),
url(r'^equipment/(?P<pk>\d+)/$', login_required(permission_required('services.delete_equipment')(EquipmentDelete.as_view())),
name='equipment_delete'),
#Insurance
url(r'^insurance/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$', login_required(permission_required('services.add_insurance')(InsuranceView)), name='insurance'),
url(r'^insurance/create$', login_required(permission_required('services.add_insurance')(InsuranceCreate.as_view())), name='insurance_create'),
url(r'^insurance/edit/(?P<pk>\d+)/$', login_required(permission_required('services.change_insurance')(InsuranceEdit.as_view())), name='insurance_edit'),
url(r'^insurance/(?P<pk>\d+)/$', login_required(permission_required('services.delete_insurance')(InsuranceDelete.as_view())), name='insurance_delete'),
#Driver
url(r'^driver/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$',login_required(permission_required('services.add_driver')(DriverView)), name='driver'),
url(r'^driver/create$', login_required(permission_required('services.add_driver')(DriverCreate.as_view())),name='driver_create'),
url(r'^driver/edit/(?P<pk>\d+)/$',login_required(permission_required('services.change_driver')(DriverEdit.as_view())),name='driver_edit'),
url(r'^driver/(?P<pk>\d+)/$',login_required(permission_required('services.delete_driver')(DriverDelete.as_view())),name='driver_delete'),
#Ifta
url(r'^ifta/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$',login_required(permission_required('services.add_ifta')(IftaView)), name='ifta'),
url(r'^ifta/create$', login_required(permission_required('services.add_ifta')(IftaCreate.as_view())), name='ifta_create'),
url(r'^ifta/edit/(?P<pk>\d+)/$',login_required(permission_required('services.change_ifta')(IftaEdit.as_view())), name='ifta_edit'),
url(r'^ifta/(?P<pk>\d+)/$', login_required(permission_required('services.delete_ifta')(IftaDelete.as_view())), name='ifta_delete'),
#Audit
url(r'^audit/view/(?P<pk>\d+)&(?P<popup>[^/]+)/$',login_required(permission_required('services.add_audit')(AuditView)), name='audit'),
url(r'^audit/create$', login_required(permission_required('services.add_audit')(AuditCreate.as_view())),name='audit_create'),
url(r'^audit/edit/(?P<pk>\d+)/$',login_required(permission_required('services.change_audit')(AuditEdit.as_view())),name='audit_edit'),
url(r'^audit/(?P<pk>\d+)/$',login_required(permission_required('services.delete_audit')(AuditDelete.as_view())),name='audit_delete'),
] | [
"alainalberto03@gmail.com"
] | alainalberto03@gmail.com |
235b0d7e97c24574ab59397ad07507f0a41dccd3 | 45d515a0e33794e7c46a3ad7e1cfdf3ac6c2ee83 | /collector.py | 75168f49016e4b9e35ec36b52b159adbb814a41a | [
"MIT"
] | permissive | djcarter85/Fantasy-Premier-League | 12b2aaef62c5bc4e0656b83572c2ff9087aa4238 | 46a8e72b80b34a1afe3d7a9c9b4f8ad0cba48b7e | refs/heads/master | 2021-07-03T13:04:05.621833 | 2020-12-21T17:16:41 | 2020-12-21T17:16:41 | 201,034,915 | 1 | 0 | NOASSERTION | 2019-08-07T11:16:27 | 2019-08-07T11:16:26 | null | UTF-8 | Python | false | false | 4,066 | py | import os
import sys
import csv
def get_teams(directory):
teams = {}
fin = open(directory + "/teams.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
teams[int(row['id'])] = row['name']
return teams
def get_fixtures(directory):
fixtures_home = {}
fixtures_away = {}
fin = open(directory + "/fixtures.csv", 'rU')
reader = csv.DictReader(fin)
for row in reader:
fixtures_home[int(row['id'])] = int(row['team_h'])
fixtures_away[int(row['id'])] = int(row['team_a'])
return fixtures_home, fixtures_away
def get_positions(directory):
positions = {}
names = {}
pos_dict = {'1': "GK", '2': "DEF", '3': "MID", '4': "FWD"}
fin = open(directory + "/players_raw.csv", 'rU',encoding="utf-8")
reader = csv.DictReader(fin)
for row in reader:
positions[int(row['id'])] = pos_dict[row['element_type']]
names[int(row['id'])] = row['first_name'] + ' ' + row['second_name']
return names, positions
def get_expected_points(gw, directory):
xPoints = {}
fin = open(os.path.join(directory, 'xP' + str(gw) + '.csv'), 'rU')
reader = csv.DictReader(fin)
for row in reader:
xPoints[int(row['id'])] = row['xP']
return xPoints
def merge_gw(gw, gw_directory):
merged_gw_filename = "merged_gw.csv"
gw_filename = "gw" + str(gw) + ".csv"
gw_path = os.path.join(gw_directory, gw_filename)
fin = open(gw_path, 'rU', encoding="utf-8")
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
fieldnames += ["GW"]
rows = []
for row in reader:
row["GW"] = gw
rows += [row]
out_path = os.path.join(gw_directory, merged_gw_filename)
fout = open(out_path,'a', encoding="utf-8")
writer = csv.DictWriter(fout, fieldnames=fieldnames, lineterminator='\n')
print(gw)
if gw == 1:
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_gw(gw, directory_name, output_dir):
rows = []
fieldnames = []
root_directory_name = "data/2020-21/"
fixtures_home, fixtures_away = get_fixtures(root_directory_name)
teams = get_teams(root_directory_name)
names, positions = get_positions(root_directory_name)
xPoints = get_expected_points(gw, output_dir)
for root, dirs, files in os.walk(u"./" + directory_name):
for fname in files:
if fname == 'gw.csv':
fpath = os.path.join(root, fname)
fin = open(fpath, 'rU')
reader = csv.DictReader(fin)
fieldnames = reader.fieldnames
for row in reader:
if int(row['round']) == gw:
id = int(os.path.basename(root).split('_')[-1])
name = names[id]
position = positions[id]
fixture = int(row['fixture'])
if row['was_home'] == True or row['was_home'] == "True":
row['team'] = teams[fixtures_home[fixture]]
else:
row['team'] = teams[fixtures_away[fixture]]
row['name'] = name
row['position'] = position
row['xP'] = xPoints[id]
rows += [row]
fieldnames = ['name', 'position', 'team', 'xP'] + fieldnames
outf = open(os.path.join(output_dir, "gw" + str(gw) + ".csv"), 'w', encoding="utf-8")
writer = csv.DictWriter(outf, fieldnames=fieldnames, lineterminator='\n')
writer.writeheader()
for row in rows:
writer.writerow(row)
def collect_all_gws(directory_name, output_dir):
for i in range(1,5):
collect_gw(i, directory_name, output_dir)
def merge_all_gws(num_gws, gw_directory):
for i in range(1, num_gws):
merge_gw(i, gw_directory)
def main():
#collect_all_gws(sys.argv[1], sys.argv[2])
merge_all_gws(int(sys.argv[1]), sys.argv[2])
#collect_gw(39, sys.argv[1], sys.argv[2])
if __name__ == '__main__':
main()
| [
"vaastav.anand05@gmail.com"
] | vaastav.anand05@gmail.com |
d54924e746d3c43eb63c909fd4df6c370922a12d | 401b2bce75ea062b5b3f729d6ee7f7ee382a7e3a | /WORK/pp,pPb_5TeV_V0_FINNAL/pp/ppLb2.py | 7810ec7cc6eed2a6a79f871c6a127c346a7a7c3a | [] | no_license | JustIonize/JustIonize | 777835a8df97fe3a9d87c3609e0667f25d20387c | bece32ef3275bb641df27c34bdd23a0113b63c87 | refs/heads/master | 2021-06-23T00:47:01.322786 | 2021-05-20T08:12:24 | 2021-05-20T08:12:24 | 223,455,203 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,714 | py | import sys, time
from ROOT import TMinuit
import numpy as np
import ROOT
import matplotlib.pyplot as plt
import ctypes
DATA = 'ppLb2.txt'
Xnew, DeltaXnew, Y1, DeltaY1, X, DeltaX, Y2, DeltaY2, Y3, DeltaY3, Y4, DeltaY4 = np.loadtxt(DATA, unpack=True)
xnew1 = [0]*11
exnew1 = [0]*11
y11 = [0]*11
ey11 = [0]*11
for i in range(0,11):
xnew1[i] = Xnew[i]
exnew1[i] = DeltaXnew[i]
y11[i] = Y1[i]
ey11[i] = DeltaY1[i]
print(xnew1[i],' ',Xnew[i])
xnew = np.array(xnew1)
exnew = np.array(exnew1)
y1 = np.array(y11)
ey1 = np.array(ey11)
x = np.array(X)
ex = np.array(DeltaX)
y2 = np.array(Y2)
ey2 = np.array(DeltaY2)
y3 = np.array(Y3)
ey3 = np.array(DeltaY3)
y4 = np.array(Y4)
ey4 = np.array(DeltaY4)
nChan1 = len(xnew)
nChan = len(x)
mk = 1.11568 #GeV L, Lbar
nPar = 3
nCHAN = 150
length = 7.5
'''
#Tsallis distr
'''
def Tsallis(pT, par):
Area, Temper, Q = par[0], par[1], par[2]
return Area*pT*pow( (1 - (1-Q)*((mk**2 + pT**2)**(0.5) - mk)/Temper) , (1/(1-Q)) )
#------------------------------------------------------------------------------------------------------1
def FCNchi1(npar, gin, f, par, iflag):
global valFCN1
yTheor = np.array([Tsallis(i, par) for i in xnew])
indPos = y1 > 0
arrayFCN = ( (y1[indPos] - yTheor[indPos])/ey1[indPos] )**2
valFCN1 = np.sum(arrayFCN)
f.value = valFCN1
'''
#MIUNIT
'''
minuit1 = ROOT.TMinuit(5)
minuit1.SetPrintLevel(1)
minuit1.SetFCN(FCNchi1)
errordef = 1.
# Chi square start parameters
minuit1.DefineParameter(0, 'Area', 3.2, 1e-4, 0., 0.)
minuit1.DefineParameter(1, 'Temper', 0.21, 1e-4, 0., 0.)
minuit1.DefineParameter(2, 'Q', 1.14, 1e-3, 0., 0.)
ierflg = ctypes.c_int(0)
minuit1.mncomd("SET ERR " + str(1), ierflg)
minuit1.mncomd("SET STR 1", ierflg)
minuit1.mncomd("MIGRAD 100000 1e-8", ierflg)
NDF1 = nChan1 - minuit1.GetNumFreePars()
print("\nChi/NDF = ", valFCN1, '/', NDF1)
valPar1 = ctypes.c_double(0)
errPar1 = ctypes.c_double(0)
parFit1 = np.zeros(5)
parErr1 = np.zeros(5)
for i in range(nPar):
minuit1.GetParameter(i, valPar1, errPar1)
parFit1[i] = valPar1.value
parErr1[i] = errPar1.value
X1 = np.linspace(0, length, nCHAN)
dx1 = X1[1] - X1[0]
DeltaX1 = [dx1]*len(X1)
Y_1 = np.array([Tsallis(i, parFit1) for i in X1])
Ynew1 = np.array([Tsallis(i, parFit1)*i**2 for i in X1])
print('\n \n \n \n')
#------------------------------------------------------------------------------------------------------2
def FCNchi2(npar, gin, f, par, iflag):
global valFCN2
yTheor = np.array([Tsallis(i, par) for i in x])
indPos = y2 > 0
arrayFCN = ( (y2[indPos] - yTheor[indPos])/ey2[indPos] )**2
valFCN2 = np.sum(arrayFCN)
f.value = valFCN2
'''
#MIUNIT
'''
minuit2 = ROOT.TMinuit(5)
minuit2.SetPrintLevel(1)
minuit2.SetFCN(FCNchi2)
errordef = 1.
# Chi square start parameters
minuit2.DefineParameter(0, 'Area', 3.2, 1e-4, 0., 0.)
minuit2.DefineParameter(1, 'Temper', 0.2, 1e-4, 0., 0.)
minuit2.DefineParameter(2, 'Q', 1.14, 1e-3, 0., 0.)
ierflg = ctypes.c_int(0)
minuit2.mncomd("SET ERR " + str(1), ierflg)
minuit2.mncomd("SET STR 1", ierflg)
minuit2.mncomd("MIGRAD 100000 1e-8", ierflg)
NDF2 = nChan - minuit2.GetNumFreePars()
print("\nChi/NDF = ", valFCN2, '/', NDF2)
valPar2 = ctypes.c_double(0)
errPar2 = ctypes.c_double(0)
parFit2 = np.zeros(5)
parErr2 = np.zeros(5)
for i in range(nPar):
minuit2.GetParameter(i, valPar2, errPar2)
parFit2[i] = valPar2.value
parErr2[i] = errPar2.value
X2 = np.linspace(0, length, nCHAN)
dx2 = X2[1] - X2[0]
DeltaX2 = [dx2]*len(X2)
Y_2 = np.array([Tsallis(i, parFit2) for i in X2])
Ynew2 = np.array([Tsallis(i, parFit2)*i**2 for i in X2])
print('\n \n \n \n')
#------------------------------------------------------------------------------------------------------3
def FCNchi3(npar, gin, f, par, iflag):
global valFCN3
yTheor = np.array([Tsallis(i, par) for i in x])
indPos = y3 > 0
arrayFCN = ( (y3[indPos] - yTheor[indPos])/ey3[indPos] )**2
valFCN3 = np.sum(arrayFCN)
f.value = valFCN3
'''
#MIUNIT
'''
minuit3 = ROOT.TMinuit(5)
minuit3.SetPrintLevel(1)
minuit3.SetFCN(FCNchi3)
errordef = 1.
# Chi square start parameters
minuit3.DefineParameter(0, 'Area', 2.9, 1e-4, 0., 0.)
minuit3.DefineParameter(1, 'Temper', 0.2, 1e-4, 0., 0.)
minuit3.DefineParameter(2, 'Q', 1.13, 1e-3, 0., 0.)
ierflg = ctypes.c_int(0)
minuit3.mncomd("SET ERR " + str(1), ierflg)
minuit3.mncomd("SET STR 1", ierflg)
minuit3.mncomd("MIGRAD 100000 1e-8", ierflg)
NDF3 = nChan - minuit3.GetNumFreePars()
print("\nChi/NDF = ", valFCN3, '/', NDF3)
valPar3 = ctypes.c_double(0)
errPar3 = ctypes.c_double(0)
parFit3 = np.zeros(5)
parErr3 = np.zeros(5)
for i in range(nPar):
minuit3.GetParameter(i, valPar3, errPar3)
parFit3[i] = valPar3.value
parErr3[i] = errPar3.value
X3 = np.linspace(0, length, nCHAN)
dx3 = X3[1] - X3[0]
DeltaX3 = [dx3]*len(X3)
Y_3 = np.array([Tsallis(i, parFit3) for i in X3])
Ynew3 = np.array([Tsallis(i, parFit3)*i**2 for i in X3])
print('\n \n \n \n')
#------------------------------------------------------------------------------------------------------4
def FCNchi4(npar, gin, f, par, iflag):
global valFCN4
yTheor = np.array([Tsallis(i, par) for i in x])
indPos = y4 > 0
arrayFCN = ( (y4[indPos] - yTheor[indPos])/ey4[indPos] )**2
valFCN4 = np.sum(arrayFCN)
f.value = valFCN4
'''
#MIUNIT
'''
minuit4 = ROOT.TMinuit(5)
minuit4.SetPrintLevel(1)
minuit4.SetFCN(FCNchi4)
errordef = 1.
# Chi square start parameters
minuit4.DefineParameter(0, 'Area', 7, 1e-4, 0., 0.)
minuit4.DefineParameter(1, 'Temper', 0.1, 1e-3, 0., 0.)
minuit4.DefineParameter(2, 'Q', 1.13, 1e-3, 0., 0.)
ierflg = ctypes.c_int(0)
minuit4.mncomd("SET ERR " + str(1), ierflg)
minuit4.mncomd("SET STR 1", ierflg)
minuit4.mncomd("MIGRAD 100000 1e-8", ierflg)
NDF4 = nChan - minuit4.GetNumFreePars()
print("\nChi/NDF = ", valFCN4, '/', NDF4)
valPar4 = ctypes.c_double(0)
errPar4 = ctypes.c_double(0)
parFit4 = np.zeros(5)
parErr4 = np.zeros(5)
for i in range(nPar):
minuit4.GetParameter(i, valPar4, errPar4)
parFit4[i] = valPar4.value
parErr4[i] = errPar4.value
X4 = np.linspace(0, length, nCHAN)
dx4 = X4[1] - X4[0]
DeltaX4 = [dx4]*len(X4)
Y_4 = np.array([Tsallis(i, parFit4) for i in X4])
Ynew4 = np.array([Tsallis(i, parFit4)*i**2 for i in X4])
print('\n \n \n \n')
#-------------------------------------------------------------------------------------------------------
'''
AREAS
'''
def findArea(x, xerr, y): # find an area under histogram
Area = 0
for i in range(len(x)):
Area = Area + 2*xerr[i]*y[i]
return Area
#normal areas
A1 = findArea(xnew, exnew, y1)
print('\nnormal areas 1\n',A1)
#normal areas with X
A_1 = findArea(X1, DeltaX1, Y_1)
print('normal areas with X 1\n',A_1)
# pT**2 * f(pT) areas
Anew1 = findArea(X1, DeltaX1, Ynew1)
print('pT**2 * f(pT) areas 1\n',Anew1)
'''
T init
'''
Tinit1 = np.sqrt( (Anew1/A_1)/2 )
print('<pT**2> 1\n',Anew1/A_1)
print('T init 1\n',Tinit1)
#print('\n DATA \n',DATA, '\n \n')
#normal areas
A2 = findArea(x, ex, y2)
print('\n\nnormal areas 2\n',A2)
#normal areas with X
A_2 = findArea(X2, DeltaX2, Y_2)
print('normal areas with X 2\n',A_2)
# pT**2 * f(pT) areas
Anew2 = findArea(X2, DeltaX2, Ynew2)
print('pT**2 * f(pT) areas 2\n',Anew2)
'''
T init
'''
Tinit2 = np.sqrt( (Anew2/A_2)/2 )
print('<pT**2> 2\n',Anew2/A_2)
print('T init 2\n',Tinit2)
#print('\n DATA \n',DATA, '\n \n')
#normal areas
A3 = findArea(x, ex, y3)
print('\n \nnormal areas 3\n',A3)
#normal areas with X
A_3 = findArea(X3, DeltaX3, Y_3)
print('normal areas with X 3\n',A_3)
# pT**2 * f(pT) areas
Anew3 = findArea(X3, DeltaX3, Ynew3)
print('pT**2 * f(pT) areas 3\n',Anew3)
'''
T init
'''
Tinit3 = np.sqrt( (Anew3/A_3)/2 )
print('<pT**2> 3\n',Anew3/A_3)
print('T init 3\n',Tinit3)
#print('\n DATA \n',DATA, '\n \n')
#normal areas
A4 = findArea(x, ex, y4)
print('\n \nnormal areas 4\n',A4)
#normal areas with X
A_4 = findArea(X4, DeltaX4, Y_4)
print('normal areas with X 4\n',A_4)
# pT**2 * f(pT) areas
Anew4 = findArea(X4, DeltaX4, Ynew4)
print('pT**2 * f(pT) areas 4\n',Anew4)
'''
T init
'''
Tinit4 = np.sqrt( (Anew4/A_4)/2 )
print('<pT**2> 4\n',Anew4/A_4)
print('T init 4\n',Tinit4)
#print('\n DATA \n',DATA, '\n \n')
#-------------------------------------------------------------------------------------------------------
'''
PLOT
'''
ROOT.gStyle.SetOptStat(0)
Plot1 = ROOT.TGraphErrors(nChan1, xnew, y1, exnew, ey1)
Plot1.SetMarkerStyle(20)
Plot1.SetMarkerColor(ROOT.kRed)
Plot1.SetMarkerSize(1.1)
Plot1.SetLineWidth(3)
Plot1.GetXaxis().SetTitle('p_{T} [GeV/c]')
Plot1.GetXaxis().SetTitleSize(0.05)
Plot1.GetXaxis().SetTitleOffset(0.85)
#Plot1.GetYaxis().SetTitle(' \\frac{\partial \sigma}{ \partial p_{T}} [\\frac{mb}{GeV/c}]')
Plot1.GetYaxis().SetTitle(' \\frac{\partial^{2} \sigma}{ \partial p_{T} \partial y} [\\frac{mb}{GeV/c}]')
Plot1.GetYaxis().SetTitleSize(0.05)
Plot1.GetYaxis().SetTitleOffset(0.85)
#Plot1.SetTitle("K_{s}^{0}, LHCb p-p #sqrt{s_{NN}}= 5.02 TeV")
#Plot1.SetTitle("K_{s}^{0}, LHCb p-Pb #sqrt{s_{NN}}= 5.02 TeV")
#Plot1.SetTitle("#Lambda^{0}, LHCb p-p #sqrt{s_{NN}}= 5.02 TeV")
#Plot1.SetTitle("#Lambda^{0}, LHCb p-Pb #sqrt{s_{NN}}= 5.02 TeV")
Plot1.SetTitle("#bar{#Lambda^{0}} pp #sqrt{s_{NN}}= 5.02 TeV")
#Plot1.SetTitle("#bar{#Lambda^{0}}, LHCb p-Pb #sqrt{s_{NN}}= 5.02 TeV")
fFit1 = ROOT.TH1F('tsallis1','Data VS Tsallis', nCHAN - 1, X1 - dx1/2.)
fFit1.SetLineColor(ROOT.kRed)
fFit1.SetLineWidth(2)
fFit1.SetLineStyle(1)
for chan in range(nCHAN):
fFit1.SetBinContent(chan + 1 ,Y_1[chan])
Plot2 = ROOT.TGraphErrors(nChan, x, y2, ex, ey2)
Plot2.SetMarkerStyle(21)
Plot2.SetMarkerColor(ROOT.kViolet-3)
Plot2.SetMarkerSize(1.1)
Plot2.SetLineWidth(3)
fFit2 = ROOT.TH1F('tsallis2','Data VS Tsallis', nCHAN - 1, X2 - dx2/2.)
fFit2.SetLineColor(ROOT.kViolet-3)
fFit2.SetLineWidth(2)
fFit2.SetLineStyle(9)
for chan in range(nCHAN):
fFit2.SetBinContent(chan + 1 ,Y_2[chan])
Plot3 = ROOT.TGraphErrors(nChan, x, y3, ex, ey3)
Plot3.SetMarkerStyle(22)
Plot3.SetMarkerColor(ROOT.kBlue)
Plot3.SetMarkerSize(1.3)
Plot3.SetLineWidth(3)
fFit3 = ROOT.TH1F('tsallis3','Data VS Tsallis', nCHAN - 1, X3 - dx3/2.)
fFit3.SetLineColor(ROOT.kBlue)
fFit3.SetLineWidth(2)
fFit3.SetLineStyle(7)
for chan in range(nCHAN):
fFit3.SetBinContent(chan + 1 ,Y_3[chan])
Plot4 = ROOT.TGraphErrors(nChan, x, y4, ex, ey4)
Plot4.SetMarkerStyle(29)
Plot4.SetMarkerColor(ROOT.kPink+8)
Plot4.SetMarkerSize(1.6)
Plot4.SetLineWidth(3)
fFit4 = ROOT.TH1F('tsallis4','Data VS Tsallis', nCHAN - 1, X4 - dx4/2.)
fFit4.SetLineColor(ROOT.kPink+8)
fFit4.SetLineWidth(2)
fFit4.SetLineStyle(3)
for chan in range(nCHAN):
fFit4.SetBinContent(chan + 1 ,Y_4[chan])
#-------------------------------------------------------------------------------------------------------
Legend = ROOT.TLegend(0.49,0.64,0.87,0.87)
Legend.AddEntry(Plot1,'2.0 < y < 2.5, T_{init}= 0.954 GeV', 'ep')
Legend.AddEntry(Plot2,'2.5 < y < 3.0, T_{init}= 0.803 GeV', 'ep')
Legend.AddEntry(Plot3,'3.0 < y < 3.5, T_{init}= 0.802 GeV', 'ep')
Legend.AddEntry(Plot4,'3.5 < y < 4.0, T_{init}= 0.760 GeV', 'ep')
#Legend.SetFillColor(kWhite)
Legend.SetTextAlign(12)
Legend.SetTextSize(0.03)
Legend.SetTextFont(2)
Legend.SetFillStyle(0)
Legend1 = ROOT.TLegend(0.49,0.64,0.87,0.87)
Legend1.AddEntry(fFit1,' ', 'l')
Legend1.AddEntry(fFit2,' ', 'l')
Legend1.AddEntry(fFit3,' ', 'l')
Legend1.AddEntry(fFit4,' ', 'l')
#-------------------------------------------------------------------------------------------------------
Plot1.Draw("AP")
fFit1.Draw("SAME&l")
fFit2.Draw("SAME&l")
fFit3.Draw("SAME&l")
fFit4.Draw("SAME&l")
Plot2.Draw("SAME&P")
Plot3.Draw("SAME&P")
Plot4.Draw("SAME&P")
Legend1.Draw("SAME")
Legend.Draw("SAME")
ROOT.gPad.SetLogy(1)
ROOT.gPad.SetTicks(1,1)
ROOT.gPad.RedrawAxis()
time.sleep(120)
ROOT.gPad.Update()
| [
"sashkoalexander@gmail.com"
] | sashkoalexander@gmail.com |
ec9ab52536b6ed4f704687ba06d3193573b16c0e | 3e202584920126e5c07732ca2e318952292e6f08 | /pyprotGUI13.py | cba66e7d68b7cb187518e98035e56b3df84f75cd | [] | no_license | shaldr23/pyprot | 56a004d0fbfa34691ac9156682d67b8b09d353bc | 7336ab692c893a581f7a30274ffc6dbf778a2144 | refs/heads/master | 2021-05-04T19:52:58.040068 | 2017-10-23T13:22:10 | 2017-10-23T13:22:10 | 106,822,648 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 16,194 | py | from tkinter import *
from tkinter.filedialog import *
from tkinter.messagebox import *
from pyprotlib.popupmenu import PopupMenu
from pyprotlib.scrolledtext import ScrolledText
from pyprotlib.accessor7 import *
from pyprotlib.guithreads import *
from pyprotlib.multirename import Multirename
import re
import sys
import os.path
############### save-open-close functions #############################################
def on_closing():
if askokcancel("Quit Program", "Do you want to quit?"):
root.destroy()
def saveinfile():
frameslist=[]
for i in range(len(tablesdict['var'])):
if tablesdict['var'][i].get()==1:
frameslist.append((tablesdict['frame'][i],tablesdict['name'][i]))
if not frameslist:
writelog('No table to save.')
else:
filename = asksaveasfilename(initialdir='/', title='Save your table',
filetypes = (('text file','*.txt'),
('csv file','*.csv'),
('excel file','*.xlsx')),
defaultextension=".")
if not filename:
writelog("You didn't save any table.")
else:
Threader(writeframes, frameslist, filename)
def openfile():
filename=askopenfilename()
if not filename:
writelog("You didn't open any file.")
else:
Threader(readframes, filename)
##### logical functions for saving and opening tables used in own thread ###############
def writeframes(frames, filename):
'''
frames is a list of frames (like frameslist): [(frame1,name1),...]
filetypes: 'excel', 'csv', 'txt'.txt is tab-separated csv.
In case of 'excel' all chosen tables are written
into different sheets. In other cases - into different
files with table names added to their names.
'''
message='Tables have been saved.'
wframes=[(Accessor.writeable(frame),name) for (frame,name) in frames]
root, ext=os.path.splitext(filename)
try:
if ext=='.xlsx':
writer = pd.ExcelWriter(filename)
for frame, name in wframes:
frame.to_excel(writer,name, index=False)
writer.save()
elif ext in ('.csv','.txt',''):
D={'.csv':',', '.txt':'\t','':'\t'}
for frame, name in wframes:
appendix='' if len(wframes)==1 else '({})'.format(name)
savename=root+appendix+ext
frame.to_csv(savename, sep=D[ext], index=False)
else:
message='Unknown format for saving'
except:
message='Error occured during saving tables'
guiqueue.put((writelog,message))
def readframes(filename):
'''
opens file on PC and makes dataframe,
saving into
'''
message='Tables have been read.'
root,ext=os.path.splitext(filename)
try:
if ext in ('.xlsx','.xls'):
obj = pd.ExcelFile(filename)
frames=[(obj.parse(sheet),sheet) for sheet in obj.sheet_names]
frames=[x for x in frames if not x[0].empty]#remove empty frames
elif ext in ('.csv','.txt',''):
D={'.csv':',', '.txt':'\t', '':'\t'}
tablename=os.path.split(root)[1]
frames=[(pd.read_csv(filename, sep=D[ext]),tablename)]
else:
message='Unknown file format.'
except:
message='Error occured during opening tables.'
frames=[(Accessor.processable(frame),name) for (frame,name) in frames]
guiqueue.put((writelog,message))
guiqueue.put((createcheckbuts, frames))
##############defining variables########################
log=[] #for logging
databases=[('FlyBase','flybase'),
('UniProt','uniprot')]
fields=['gene name',
'molecular function',
'biological process',
'cellular component',
'interactions',
'length',
'mass (kDa)',
'CG* id']
entrytypes=['gene name',
'protein name',
'CG* id',
'uniprot id',
'flybase id',
'auto-determine']
lognum=0 #number of a new log note
tablesdict={i:[] for i in ('name','frame','var','chbut')} #dictionary of lists for existing tables
#and associated elemens.
guiqueue=queue.Queue() #queue to change gui in main thread
workdir=os.path.split(sys.argv[0])[0] #path of working directory
iconpath=os.path.join(workdir,'images/PY-UP2.ico') #path of the icon
####### vars for database-associated fields to restrict inaccessible ones #################
accord=pd.read_csv('pyprotlib/db_columns.txt',sep='\t')
accord=accord[accord['gui'].notnull()]
fbaccessible=accord['gui'][accord['fb_req'].notnull()].values
upaccessible=accord['gui'][accord['up_req'].notnull()].values
######## dictionary for renaming output columns ################################
renamecoldict={}
for colname in ('fb_col','up_col'):
res=accord[accord[colname].notnull()]
D=dict(zip(res[colname].values,res['gui'].values))
renamecoldict.update(D)
####################functions############################
#def __init__(self, DB, protlist, **kargs):
###############################################
def getdata():
'''
Function to get access to protein databases.
Executes another function in another thread.
'''
def action():
prots=prottext.get(1.0,END).strip()
if not prots:
guiqueue.put((writelog,'No query has been entered.'))
else:
protlist=re.split('\s+', prots) #get protlist from text
Acc=Accessor()
Acc.access(dbvar.get(),protlist,getfields())
frame=Acc.frame.rename(columns=renamecoldict)
# In case of uniprot: all entry names get upper case.
# Fixing it (considering the situation may change):
if dbvar.get()=='uniprot':
upentries=[x[0] for x in frame['query'].values]
protdict={x.upper():x for x in protlist}
frame['query']=[(protdict[x.upper()],) for x in upentries]
if keepordervar.get():
frame=Accessor.order(frame,'query',protlist)
# Make uniform order in columns:
ordcols=[x for x in ['query']+fields if x in frame.columns]
frame=frame[ordcols]
guiqueue.put((createcheckbuts,[(frame,dbvar.get())]))
def getfields():
reqcol='up_req' if dbvar.get()=='uniprot' else 'fb_req'
fieldslist=[fieldsdict['name'][i] for i
in range(len(fieldsdict['name']))
if fieldsdict['var'][i].get()==1]
fieldslist=list(accord[reqcol][accord['gui'].isin(fieldslist)])
return fieldslist
Threader(action)
def deltables(*args):
'''
Function for deletion chosen tables by checkbuttons
Executed in gui thread
'''
names=[]
L=(len(tablesdict['var']))
for i in range(L-1,-1,-1): #running from end to escape 'out of range' error
if tablesdict['var'][i].get()==1:
tablesdict['chbut'][i].pack_forget()
names.append(tablesdict['name'][i])
for key in tablesdict.keys():
tablesdict[key].pop(i)
if names:
message='Tables have been deleted: {}.'.format(', '.join(reversed(names)))
else:
message='No tables have been chosen for deletion.'
writelog(message)
def unitetables():
'''
Funtion for making union of tables. Implements Accessor.uniteframes.
'''
def action():
frameslist,nameslist=[],[]
for frame,name,var in zip(tablesdict['frame'],
tablesdict['name'],tablesdict['var']):
if var.get():
frameslist.append(frame)
nameslist.append(name)
if len(frameslist)<2:
message='Unable to unite. At least two tables must be selected'
else:
united=Accessor.uniteframes(frameslist,'query')
message='Union of tables has been made: {}'.format(', '.join(nameslist))
guiqueue.put((createcheckbuts, [(united,'united')]))
guiqueue.put((writelog, message))
Threader(action)
def termsfreq():
'''
Function for making terms frequencies. Implements Accessor.termsfreq.
'''
def action():
frameslist=[]
for frame,name,var in zip(tablesdict['frame'],
tablesdict['name'],tablesdict['var']):
if var.get():
frameslist.append([Accessor.termsfreq(frame),name+'_tf'])
guiqueue.put((createcheckbuts, frameslist))
Threader(action)
def renametables():
'''
Function for renaming tables.
'''
def action():
newnames=M.getnewnames()
tablesdict['name']=[newnames.get(x,x) for x in tablesdict['name']]
for i in range(len(tablesdict['name'])):
tablesdict['chbut'][i].config(text=tablesdict['name'][i])
Top.destroy()
diff={x:y for (x,y) in newnames.items() if x!=y}
if not diff:
writelog('Tables have not been renamed.')
else:
parts=['{} -> {}'.format(x,y) for (x,y) in diff.items()]
string=', '.join(parts)
writelog('Tables have been renamed: '+string)
nameslist=[]
for name,var in zip(tablesdict['name'],tablesdict['var']):
if var.get():
nameslist.append(name)
if not nameslist:
writelog('No tables have been chosen for renaming.')
else:
Top=Toplevel()
Top.title('Rename')
M=Multirename(Top,nameslist)
M.renamebutton.config(command=action)
M.pack(padx=30,pady=30)
def createcheckbuts(frames):
'''
The function creates checkbuttons associated with tables
and puts them and associated objects (including frames themselves)
into tablesdict. Gets iterable of kind: [(frame,name),...].
If names of frames don't exist or already reserved, give them names
like 'Table_№'.
Executed in gui thread and can be put into guiqueue.
'''
names=[] #for output in writelog
for frame,name in frames:
if not name:
name='Table'
num=1
while name in tablesdict['name']: #cycle to create a unique name
s=re.search('\((\d+)\)$',name)
if s:
name=name.rstrip(s.group(0))
name+='({})'.format(str(num))
num+=1
names.append(name)
var=IntVar()
chbut=Checkbutton(tablesframe, text=name,variable=var)
chbut.pack(anchor='w')
for key,obj in (('name', name), ('var',var),
('chbut',chbut),('frame',frame)):
tablesdict[key].append(obj)
writelog('New tables have been created: {}.'.format(', '.join(names)))
def writelog(message):
'''
Function to save actions in log list
and display them in logtext
'''
global lognum
lognum+=1
ordmessage=str(lognum)+ '. ' + message
log.append(ordmessage)
logtext.config(state=NORMAL)
logtext.insert(END, ordmessage+'\n')
logtext.config(state=DISABLED)
logtext.see('end')
def selectallf():
'''
Select all fields of checkbuttons
'''
for i in range(len(fieldsdict['var'])):
if fieldsdict['chbut'][i].cget('state')=='normal':
fieldsdict['var'][i].set(1)
def deselectallf():
'''
Deselect all fields of checkbuttons
'''
for var in fieldsdict['var']:
var.set(0)
def dbrestrict():
'''
Make some fields disabled dependent on chosen database.
Is called by dbvar.trace().
'''
D={'uniprot':upaccessible,'flybase':fbaccessible}
accessible=D[dbvar.get()]
for i in range(len(fieldsdict['name'])):
fieldsdict['chbut'][i].config(state=NORMAL)
if fieldsdict['name'][i] not in accessible:
fieldsdict['var'][i].set(0)
fieldsdict['chbut'][i].config(state=DISABLED)
###########root and menu########################################
root = Tk()
root.title('PyProt')
root.protocol("WM_DELETE_WINDOW", on_closing)
menubar=Menu(root)
filemenu=Menu(menubar,tearoff=0)
filemenu.add_command(label='Open file as table',command=openfile)
filemenu.add_command(label='Save tables as',command=saveinfile)
menubar.add_cascade(label='Options',menu=filemenu)
root.config(menu=menubar)
############ radiobuttons for databases #####################################
dbframe=Frame(root)
dblabel=Label(dbframe,text='Database:', font=('',12))
dbvar=StringVar()
dbvar.set('uniprot')
dbvar.trace('w',lambda *args: dbrestrict())
dbradbuts=[Radiobutton(dbframe, variable=dbvar, text=x, value=y, font=('',12))
for (x,y) in databases]
############ radiobuttons to choose entry type #########################################
entrytypesframe=Frame(root)
entvar=StringVar()
entradbuts=[Radiobutton(entrytypesframe, variable=entvar, text=x, value=x, font=('',15))
for x in entrytypes]
############ checkbuttons for fields #####################################################
fieldsframe=Frame(root) #frame for checkbuttons
fieldslabel=Label(fieldsframe,text='Fields:', font=('',13))
fieldsdict={} #creating dictionary of fields and
#associated checkbuttons and IntVars
fieldsdict['name']=fields
fieldsdict['var']=[IntVar() for x in fields]
fieldsdict['chbut']=[Checkbutton(fieldsframe, text=fieldsdict['name'][i],
variable=fieldsdict['var'][i],font=('',10))
for i in range(len(fields))]
selectall=Button(fieldsframe,text='Select all',font=('',10,'italic'),command=selectallf)
deselectall=Button(fieldsframe,text='Deselect all',font=('',10,'italic'),command=deselectallf)
############## frame and label for tables ##########################################
tablesframe=Frame(root,height=300,width=200,highlightbackground='black',
highlightthickness=2) #frame for checkbuttons of created tables
tableslabel=Label(tablesframe, text='Created tables:',font=('',12,'bold'))
################ text widget for entries and button to yield data ##########################
prottextframe=Frame(root)
protlabel=Label(prottextframe, text='Your query:',font=('',12,'bold'))
prottext=ScrolledText(prottextframe, width=15, height=10) #to paste entries here
PopupMenu(prottext)
getdatabutton=Button(prottextframe,text='Get data',command=getdata,font=('',12,'bold'))
keepordervar=IntVar()
keeporder=Checkbutton(prottextframe,text='Keep query order',variable=keepordervar)
keepordervar.set(1)
################ widgets for logging #######################################################
logtextframe=Frame(root)
loglabel=Label(logtextframe,text='Log:',font=('',12))
logtext=ScrolledText(logtextframe, width=40, height=20) #to display log. DISABLED state.
logtext.config(state=DISABLED)
###### buttons to control tables #############################
butsframe=Frame(root)
buttuple=(('Delete tables',deltables),('Unite tables',unitetables),
('Get terms freq.', termsfreq),('Rename tables',renametables))
buttonsdict={elem[0]:Button(butsframe,text=elem[0], command=elem[1],font=('',12))
for elem in buttuple}
###### packing #################################
prottextframe.pack(side=LEFT,anchor='nw')
protlabel.pack()
prottext.pack()
keeporder.pack()
getdatabutton.pack()
dbframe.pack(side=LEFT,anchor='n')
dblabel.pack(anchor='w')
for i in dbradbuts:
i.pack(anchor='w')
fieldsframe.pack(side=LEFT,anchor='n')
fieldslabel.pack(anchor='w')
for f in fieldsdict['chbut']:
f.pack(anchor='w')
selectall.pack(anchor='w')
deselectall.pack(anchor='w')
butsframe.pack(side=LEFT,anchor='n')
for i in sorted(buttonsdict.keys()):
buttonsdict[i].pack(anchor='w',fill=X)
tablesframe.pack(side=LEFT,anchor='nw')
tablesframe.pack_propagate(False)
tableslabel.pack()
logtextframe.pack(side=LEFT)
loglabel.pack()
logtext.pack()
############### starting directives ###############################################
dbrestrict()
root.after(0,execqueue,root, guiqueue)
root.mainloop()
| [
"32765891+shaldr23@users.noreply.github.com"
] | 32765891+shaldr23@users.noreply.github.com |
89f106251db7d0df0d926aea96cdaba39a649659 | 224739e40778b1350205ad83114274e97e3ed9f4 | /consts.py | 5542026b1f3386ab71beb1c74a84e4d448eb84d0 | [] | no_license | miladsaber/ssh-pot | 6a2c4214776167a2186c1da1470ff7826758c467 | 616b2fd694816d4041833e4292d22977fec539f6 | refs/heads/master | 2022-11-14T07:47:08.765998 | 2020-06-30T13:00:45 | 2020-06-30T13:00:45 | 276,097,484 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 123 | py | ssh_versions = {
"ubuntu":{
"version":'OpenSSH_6.6.1p1',
"comment":'Ubuntu-2ubuntu2.3',
"protocolVersion":"2.0"
}
} | [
"miladsaber@ymail.com"
] | miladsaber@ymail.com |
fb817ccafa23f64c31e1ef915ddd6a2539541bf8 | 267844c78976fb1e61e35121fe799cd6d1c3ce89 | /game.py | bb1215292bca68893b854e2fc324a15ac4ba7b71 | [] | no_license | tnals123/My-Pokemon-Game | e3dc82c914b0cfc9acc3cb0c9e5446029b8a5333 | 891053b8f5bc7490929d6d1496fe3544988cdf25 | refs/heads/master | 2023-07-22T13:24:54.501923 | 2021-08-18T12:06:42 | 2021-08-18T12:06:42 | 397,584,422 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 32,246 | py |
import os
import thread4
import threading
import time
import thread
import thread5
import thread2
class Print(threading.Thread):
def __init__(self):
pass
threading.Thread.__init__(self)
self.hp=thread.HP(200,5000,1,0,40,80,200)
self.hp.start()
self.hp1=thread5.Game()
self.hp1.start()
self.at=thread2.Enemy()
self.at.start()
self.mon=thread4.Money(5000)
def run(self):
self.fightscreen()
def store2(self):
while True:
print('''
''')
self.mon.nowmoney()
print('''
===================================★ ☆ ★ 상점 입니다. ★ ☆ ★================================================
1: 낫- 기본 데미지를 6 늘려줍니다. 2: 애니비아-스킬 데미지를 늘려줍니다.
(가격: 450골드) (가격: 1000골드)
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢤⣶⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⣿⣿⣏⢴⢏⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡏⢹⣿⣀⣀⣀⣀⣀⣤⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⡾⠿⢿⡀⠀⠀⠀⠀⣠⣶⣿⣷⠀⠀⠀⠀ ⣿⣿⣿⣿⣿⣿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠜⠻⠿⠿⠿⠿⠿⠿⠿
⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣦⣴⣿⡋⠀⠀⠈⢳⡄⠀⢠⣾⣿⠁⠈⣿⡆⠀⠀⠀ ⣿⣿⠿⣿⣿⣷⣞⣧⣿⣿⣿⣿⣿⣿⣿⣿⡿⠟⠋⠉⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⠿⠛⠉⠉⠁⠀⠀⠀⠹⡄⣿⣿⣿⠀⠀⢹⡇⠀⠀⠀ ⣿⣧⠾⣺⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣫⣤⣴⣦⣶⣾⣿⡦⠤⠀⠀⠤⠤⠤⠖⠒
⠀⠀⠀⠀⠀⣠⣾⡿⠋⠁⠀⠀⠀⠀⠀⠀⠀⠀⣰⣏⢻⣿⣿⡆⠀⠸⣿⠀⠀⠀ ⣿⣿⣿⣿⣿⡿⢋⡻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢉⡀⣀⠀⢠⠃⠀⠀⠀
⠀⠀⠀⢀⣴⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣆⠹⣿⣷⠀⢘⣿⠀⠀⠀ ⣿⣿⣿⣿⣿⣿⣮⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠀⡃⠀⠀⡇⡜⠉⠙⠛⠓⠒
⠀⠀⢀⡾⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⠋⠉⠛⠂⠹⠿⣲⣿⣿⣧⠀⠀ ⣿⠛⣹⢻⣿⣿⣿⣿⣿⣿⣿⡿⠿⠿⠛⠋⠉⠉⠐⠈⠙⢲⡊⡹⠳⣤⣤⡀⠀⠀
⠀⢠⠏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣿⣿⣿⣷⣾⣿⡇⢀⠀⣼⣿⣿⣿⣧⠀ ⣿⣷⣷⣿⣿⣿⣿⡿⠛⠋⠁⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡵⣁⣀⡀⠀⠉⠉⠙
⠰⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⡘⢿⣿⣿⣿⠀ ⣿⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⢀⣀⠠⠀⠀⠀⠐⠀⣇⠀⠀⠉⠓⠲⢤⡀
⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⣷⡈⠿⢿⣿⡆ ⣿⣿⣿⣿⡟⠀⣀⣠⣤⣶⣾⣿⣿⣿⣿⣿⣶⣶⣦⣤⣤⣬⡀⠙⢲⡄⠀⠀⠀⠈
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⠛⠁⢙⠛⣿⣿⣿⣿⡟⠀⡿⠀⠀⢀⣿⡇ ⣿⠟⣻⢿⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡆⠀⠙⠢⣄⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣶⣤⣉⣛⠻⠇⢠⣿⣾⣿⡄⢻⡇ ⣿⣮⣯⣾⣿⣿⡟⣩⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠳⡄⠀⠀⠘⣦⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣦⣤⣾⣿⣿⣿⣿⣆⠁ ⣿⣿⣿⣿⣿⣿⣿⣿⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠋⠀⠙⢲⠀⠀⠈⢳
⣿⣿⣿⣿⣿⡿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⣆⠀⠀⠈⠳⣄⠀⠀
⠀⠀⠀⠀ ⣿⣿⣿⣿⣿⣔⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠁⠀⠙⡆⠀⠀⠀⠘⠷⢄
구입:13 구입:14
3: 상점 1페이지로
6: 상점 나가기
''' )
time.sleep(0.25)
os.system('cls')
if self.hp1.choice=='13':
if self.mon.money>=450:
self.mon.money-=450
self.hp.attack1+=6
self.fightscreen()
if self.hp1.choice=='14':
if self.mon.money>=1000:
self.mon.money-=1000
self.hp.hddm+=40
self.hp.wtdm+=80
self.hp.firedm+=100
self.fightscreen()
if self.hp1.choice=='3':
self.store()
if self.hp1.choice=='6':
self.fightscreen()
if self.hp.myhealth<=0:
print('패배')
break
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
print('''
''')
self.mon.nowmoney()
print('''
===================================☆ ★ ☆ 상점 입니다. ☆ ★ ☆================================================
1: 낫- 기본 데미지를 6 늘려줍니다. 2: 애니비아-스킬 데미지를 늘려줍니다.
(가격: 450골드) (가격: 1000골드)
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢤⣶⣄⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀ ⣿⣿⣏⢴⢏⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡏⢹⣿⣀⣀⣀⣀⣀⣤⣤
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣀⣤⡾⠿⢿⡀⠀⠀⠀⠀⣠⣶⣿⣷⠀⠀⠀⠀ ⣿⣿⣿⣿⣿⣿⠿⢿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠜⠻⠿⠿⠿⠿⠿⠿⠿
⠀⠀⠀⠀⠀⠀⠀⠀⢀⣴⣦⣴⣿⡋⠀⠀⠈⢳⡄⠀⢠⣾⣿⠁⠈⣿⡆⠀⠀⠀ ⣿⣿⠿⣿⣿⣷⣞⣧⣿⣿⣿⣿⣿⣿⣿⣿⡿⠟⠋⠉⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⣰⣿⣿⠿⠛⠉⠉⠁⠀⠀⠀⠹⡄⣿⣿⣿⠀⠀⢹⡇⠀⠀⠀ ⣿⣧⠾⣺⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣫⣤⣴⣦⣶⣾⣿⡦⠤⠀⠀⠤⠤⠤⠖⠒
⠀⠀⠀⠀⠀⣠⣾⡿⠋⠁⠀⠀⠀⠀⠀⠀⠀⠀⣰⣏⢻⣿⣿⡆⠀⠸⣿⠀⠀⠀ ⣿⣿⣿⣿⣿⡿⢋⡻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⢉⡀⣀⠀⢠⠃⠀⠀⠀
⠀⠀⠀⢀⣴⠟⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣆⠹⣿⣷⠀⢘⣿⠀⠀⠀ ⣿⣿⣿⣿⣿⣿⣮⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠟⠀⡃⠀⠀⡇⡜⠉⠙⠛⠓⠒
⠀⠀⢀⡾⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⣿⣿⠋⠉⠛⠂⠹⠿⣲⣿⣿⣧⠀⠀ ⣿⠛⣹⢻⣿⣿⣿⣿⣿⣿⣿⡿⠿⠿⠛⠋⠉⠉⠐⠈⠙⢲⡊⡹⠳⣤⣤⡀⠀⠀
⠀⢠⠏⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⣤⣿⣿⣿⣷⣾⣿⡇⢀⠀⣼⣿⣿⣿⣧⠀ ⣿⣷⣷⣿⣿⣿⣿⡿⠛⠋⠁⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡵⣁⣀⡀⠀⠉⠉⠙
⠰⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢠⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⡘⢿⣿⣿⣿⠀ ⣿⣿⣿⣿⣿⡿⠋⠀⠀⠀⠀⠀⠀⠀⢀⣀⠠⠀⠀⠀⠐⠀⣇⠀⠀⠉⠓⠲⢤⡀
⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠸⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠀⣷⡈⠿⢿⣿⡆ ⣿⣿⣿⣿⡟⠀⣀⣠⣤⣶⣾⣿⣿⣿⣿⣿⣶⣶⣦⣤⣤⣬⡀⠙⢲⡄⠀⠀⠀⠈
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⠛⠁⢙⠛⣿⣿⣿⣿⡟⠀⡿⠀⠀⢀⣿⡇ ⣿⠟⣻⢿⣶⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡆⠀⠙⠢⣄⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠘⣶⣤⣉⣛⠻⠇⢠⣿⣾⣿⡄⢻⡇ ⣿⣮⣯⣾⣿⣿⡟⣩⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠳⡄⠀⠀⠘⣦⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣿⣿⣿⣿⣦⣤⣾⣿⣿⣿⣿⣆⠁ ⣿⣿⣿⣿⣿⣿⣿⣿⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠋⠀⠙⢲⠀⠀⠈⢳
⣿⣿⣿⣿⣿⡿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⡿⠛⣆⠀⠀⠈⠳⣄⠀⠀
⠀⠀⠀⠀ ⣿⣿⣿⣿⣿⣔⣻⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⠁⠀⠙⡆⠀⠀⠀⠘⠷⢄
구입:13 구입:14
3: 상점 1페이지로
6: 상점 나가기
''' )
time.sleep(0.25)
os.system('cls')
if self.hp1.choice=='13':
if self.mon.money>=450:
self.mon.money-=450
self.hp.attack1+=6
self.fightscreen()
if self.hp1.choice=='14':
if self.mon.money>=1000:
self.mon.money-=1000
self.hp.hddm+=40
self.hp.wtdm+=80
self.hp.firedm+=100
self.fightscreen()
if self.hp1.choice=='3':
self.store()
if self.hp1.choice=='6':
self.fightscreen()
if self.hp.myhealth<=0:
print('패배')
break
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
def store(self):
while True:
print('''
''')
self.mon.nowmoney()
print('''
===================================★ ☆ ★ 상점 입니다. ★ ☆ ★================================================
1:음전자의 망토- 입는 피해를 3 감소시킵니다. 2:체력 물약-체력을 회복합니다.
(가격: 800골드) (가격:50골드)
⠄⠄⠄⠄⠄⠄⠄⠄⢀⣀⣤⣴⣶⠞⠛⢶⣤⣄⡀⠄⠄⠄⠄⠄⠄⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠄⣠⡶⠿⠿⠿⠿⠟⠁⣰⠇⣈⠻⣿⣿⣷⣶⣤⣀⠄⠄⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⠤⠄⠒⠒⠈⠉⠉⠉⠉⠐⠒⠒⠒⠂⠤⣤⡄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⢠⣾⣿⡗⢿⣶⣶⣤⣴⣾⠟⢠⡏⠄⠄⠈⠙⠿⣿⣿⣷⣦⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢻⠒⠤⢀⣀⣀⡀⠄⠄⠠⠠⠤⠤⠤⠒⠊⡸⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⢠⣿⣿⣿⠇⢶⣤⣀⡺⠿⠋⣠⡾⠄⠄⢤⣶⣤⣄⠈⠛⢿⣿⣷⡄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠸⡀⠄⠄⢀⠄⠂⠄⠄⠄⠄⠄⠄⠄⠄⡰⠁⠄⠄⠄⠄⠄⠄⠄⠄
⠄⢀⣿⣿⣿⣣⣿⣷⣌⡛⢿⣿⣾⡟⠁⢤⣤⣀⠙⢿⣿⣷⣄⠄⠙⢿⣷⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠻⡼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠃⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⣼⣿⣿⣳⣿⣿⣿⣿⣿⣷⣦⢭⣶⣇⠄⠻⣿⣧⡀⠙⢿⣿⣷⣦⡀⠙⠇ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢹⣫⣙⣛⣛⣛⣿⣟⣻⡻⠃⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢰⣿⣿⣳⣿⣿⣻⣿⢿⣿⣿⣿⣿⣿⣿⣷⡀⠹⣿⣿⣄⠄⠹⣿⣿⣴⡄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣿⡟⣯⣽⣾⣿⣟⣻⠁⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢸⡿⣱⣿⣿⣏⣿⣿⢸⣿⣿⣧⣿⣿⣿⣿⣷⡀⠘⣿⣿⣦⠄⠈⢿⡿⣱⣿ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣨⣿⣿⣿⣿⣗⣹⡇⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠘⣵⣿⣿⣿⣸⣿⣿⢾⣿⣿⣿⢸⣿⣿⣿⣿⣷⠄⡜⣿⣿⣷⠄⠄⠁⣿⡿ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣀⡼⠋⠈⠉⠉⠉⠉⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢸⣶⣍⢿⢧⣿⣿⣿⢸⣿⣿⣿⢸⣿⣿⣿⣿⣿⣇⠘⡜⣿⣷⣴⣦⣀⠘⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⡠⠞⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠻⣿⢇⣾⣿⣿⣿⢸⣿⣿⣿⡯⣿⣿⣿⣿⣿⣿⡆⠘⡽⡟⢫⣴⣶⡆⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⣠⠶⠋⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠙⢷⣿⡭⠡⠆⢸⣿⣿⣿⡇⠿⣿⣿⣿⣿⠛⠻⠄⢫⠄⣀⡹⣿⡇⠄ ⠄⠄⠄⠄⠄⠄⠄⢀⣠⠴⠛⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠙⠃⠄⢀⣚⣭⣭⣭⡍⠄⣿⣿⣿⡿⢟⣛⣂⠄⣼⡿⣣⡟⠄⠄ ⢀⣀⣀⡤⠤⠖⠛⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠄⠄⠉⠙⠻⣿⣿⣿⣁⣀⣈⣩⣭⣶⣿⣿⣿⣷⣭⡶⠋
구입:11 구입:12
5: 상점 2 페이지
6: 상점 나가기
''' )
time.sleep(0.25)
os.system('cls')
if self.hp1.choice=='11':
if self.mon.money>=800:
self.mon.money-=800
self.hp.depense=1
self.fightscreen()
if self.hp1.choice=='12':
if self.mon.money>=50:
self.mon.money-=50
self.hp.myhealth=200
self.fightscreen()
if self.hp1.choice=='5':
self.store2()
if self.hp1.choice=='6':
self.fightscreen()
if self.hp.myhealth<=0:
print('패배')
break
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
print('''
''')
self.mon.nowmoney()
print('''
===================================☆ ★ ☆ 상점 입니다. ☆ ★ ☆================================================
1:음전자의 망토- 입는 피해를 3 감소시킵니다. 2:체력 물약-체력을 회복합니다.
(가격: 800골드) (가격:50골드)
⠄⠄⠄⠄⠄⠄⠄⠄⢀⣀⣤⣴⣶⠞⠛⢶⣤⣄⡀⠄⠄⠄⠄⠄⠄⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠄⣠⡶⠿⠿⠿⠿⠟⠁⣰⠇⣈⠻⣿⣿⣷⣶⣤⣀⠄⠄⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⠤⠄⠒⠒⠈⠉⠉⠉⠉⠐⠒⠒⠒⠂⠤⣤⡄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⢠⣾⣿⡗⢿⣶⣶⣤⣴⣾⠟⢠⡏⠄⠄⠈⠙⠿⣿⣿⣷⣦⠄⠄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢻⠒⠤⢀⣀⣀⡀⠄⠄⠠⠠⠤⠤⠤⠒⠊⡸⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⢠⣿⣿⣿⠇⢶⣤⣀⡺⠿⠋⣠⡾⠄⠄⢤⣶⣤⣄⠈⠛⢿⣿⣷⡄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠸⡀⠄⠄⢀⠄⠂⠄⠄⠄⠄⠄⠄⠄⠄⡰⠁⠄⠄⠄⠄⠄⠄⠄⠄
⠄⢀⣿⣿⣿⣣⣿⣷⣌⡛⢿⣿⣾⡟⠁⢤⣤⣀⠙⢿⣿⣷⣄⠄⠙⢿⣷⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠻⡼⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⠃⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⣼⣿⣿⣳⣿⣿⣿⣿⣿⣷⣦⢭⣶⣇⠄⠻⣿⣧⡀⠙⢿⣿⣷⣦⡀⠙⠇ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢹⣫⣙⣛⣛⣛⣿⣟⣻⡻⠃⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢰⣿⣿⣳⣿⣿⣻⣿⢿⣿⣿⣿⣿⣿⣿⣷⡀⠹⣿⣿⣄⠄⠹⣿⣿⣴⡄⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣿⡟⣯⣽⣾⣿⣟⣻⠁⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢸⡿⣱⣿⣿⣏⣿⣿⢸⣿⣿⣧⣿⣿⣿⣿⣷⡀⠘⣿⣿⣦⠄⠈⢿⡿⣱⣿ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣨⣿⣿⣿⣿⣗⣹⡇⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠘⣵⣿⣿⣿⣸⣿⣿⢾⣿⣿⣿⢸⣿⣿⣿⣿⣷⠄⡜⣿⣿⣷⠄⠄⠁⣿⡿ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⣀⡼⠋⠈⠉⠉⠉⠉⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⢸⣶⣍⢿⢧⣿⣿⣿⢸⣿⣿⣿⢸⣿⣿⣿⣿⣿⣇⠘⡜⣿⣷⣴⣦⣀⠘⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⡠⠞⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠻⣿⢇⣾⣿⣿⣿⢸⣿⣿⣿⡯⣿⣿⣿⣿⣿⣿⡆⠘⡽⡟⢫⣴⣶⡆⠄ ⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⢀⣠⠶⠋⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠙⢷⣿⡭⠡⠆⢸⣿⣿⣿⡇⠿⣿⣿⣿⣿⠛⠻⠄⢫⠄⣀⡹⣿⡇⠄ ⠄⠄⠄⠄⠄⠄⠄⢀⣠⠴⠛⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠙⠃⠄⢀⣚⣭⣭⣭⡍⠄⣿⣿⣿⡿⢟⣛⣂⠄⣼⡿⣣⡟⠄⠄ ⢀⣀⣀⡤⠤⠖⠛⠉⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄⠄
⠄⠄⠄⠄⠄⠄⠉⠙⠻⣿⣿⣿⣁⣀⣈⣩⣭⣶⣿⣿⣿⣷⣭⡶⠋
구입:11 구입:12
5: 상점 2 페이지
6: 상점 나가기
''' )
time.sleep(0.25)
os.system('cls')
if self.hp1.choice=='11':
if self.mon.money>=800:
self.mon.money-=800
self.hp.depense=1
self.fightscreen()
if self.hp1.choice=='12':
if self.mon.money>=50:
self.mon.money-=50
self.hp.myhealth=200
self.fightscreen()
if self.hp1.choice=='5':
self.store2()
if self.hp1.choice=='6':
self.fightscreen()
if self.hp.myhealth<=0:
print('당신은 죽었습니다. 게임 패배')
break
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
def fightscreen(self):
while True:
print('''⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
''')
self.mon.nowmoney()
self.hp.printhp()
print(''' ⡠⠤⠀⠒⠒⠂⠐⠒⢢⡦⠆⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡤⠊⠀⠀⠀⠀⠀⠀⠀⠀⠋⢰⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡰⢑⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡆⠠⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡆⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢇⠈⠂⠀⠀⠀⠀⠀⠀⠀⢀⠀⡺⠁⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⡠⠲⠛⠈⠙⠹⠳⡆⠂⠀⠀⠂⢒⡩⠓⠈⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⢰⠁⢀⡀⢀⣤⣄⡀⢈⡗⠒⠬⠙⠧⠴⠋⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⣣⣡⡭⠉⠀⡸⡇⡀⠀⣱⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀ ⣿⠣⠌⠁⠈⠄⠌⣷⠠⠅⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠙⢢⡤⡂⡠⠤⣰⡁⡓⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⡔⢓⢄⢖⢙⡿⢀⡾⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⢰⡫⢑⢥⠖⢣⠗⠁⢠⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠰⣤⣮⣥⠤⣼⣷⠖⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠿⠷⢿⠟⠷⣝⢏⣽⡶⠶⠶⠶⠶⠶⠶⠶⢶⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢸⠑⢠⠣⠦⣽⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢾⢽⢶⣟⡅⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢀⡤⢾⢼⢺⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
░░░░░░░░░░░░▄▀▀▀▀▄░░░ █
░░░░░░░░░░▄▀░░!░!░█░░ █
░▄▄░░░░░▄▀░░░░▄▄▄▄█░░ █
█░░▀▄░▄▀░░░░░░░░░░████████ ⠀⠀⠀⠀⠀⠀
░▀▄░░▀▄░░░░█░░░░░░█░░ 1:몸통 박치기(MP:40) 2:물대포(MP:60)
░░░▀▄░░▀░░░█░░░░░░█░░ 3:화염방사(MP:100) 4:상점
░░░▄▀░░░░░░█░░░░▄▀░░░
░░░▀▄▀▄▄▀░░█▀░▄▀░░░░░
░░░░░░░░█▀▀█▀▀░░░░░░░
░░░░░░░░▀▀░▀▀░░░░░░░░⠀⠀⠀⠀''')
self.hp.printmyhp()
self.hp.manaupdate()
print('''
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀''')
time.sleep(0.25)
if self.hp1.choice=='1':
self.hp.headbutt()
self.hp1.choice='0'
if self.hp1.choice=='2':
self.hp.watergun()
self.hp1.choice='0'
if self.hp1.choice=='3':
self.hp.fire()
self.hp1.choice='0'
os.system('cls')
if self.hp1.choice=='4':
self.store()
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
if self.hp.myhealth<=0:
print('패배')
break
print('''⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
''')
self.mon.nowmoney()
self.hp.printhp()
print(''' ⡠⠤⠀⠒⠒⠂⠐⠒⢢⡦⠆⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢀⡤⠊⠀⠀⠀⠀⠀⠀⠀⠀⠋⢰⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡰⢑⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢰⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡆⠠⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⡆⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⢇⠈⠂⠀⠀⠀⠀⠀⠀⠀⢀⠀⡺⠁⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⡠⠲⠛⠈⠙⠹⠳⡆⠂⠀⠀⠂⢒⡩⠓⠈⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⢰⠁⢀⡀⢀⣤⣄⡀⢈⡗⠒⠬⠙⠧⠴⠋⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⣣⣡⡭⠉⠀⡸⡇⡀⠀⣱⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀ ⣿⠣⠌⠁⠈⠄⠌⣷⠠⠅⡆⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠙⢢⡤⡂⡠⠤⣰⡁⡓⢀⡀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⡔⢓⢄⢖⢙⡿⢀⡾⣸⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⢰⡫⢑⢥⠖⢣⠗⠁⢠⠃⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠰⣤⣮⣥⠤⣼⣷⠖⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠿⠷⢿⠟⠷⣝⢏⣽⡶⠶⠶⠶⠶⠶⠶⠶⢶⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢸⠑⢠⠣⠦⣽⡇⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠙⢾⢽⢶⣟⡅⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
⠀⠀⠀⠀⠀⠀⠀⠀⢀⡤⢾⢼⢺⠉⠁⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀
░░░░░░░░░░░░▄▀▀▀▀▄░░░
░░░░░░░░░░▄▀░░▄░▄░█░░
░▄▄░░░░░▄▀░░░░▄▄▄▄█░░
█░░▀▄░▄▀░░░░░░░░░░█░░ ⠀⠀⠀⠀⠀⠀
░▀▄░░▀▄░░░░█░░░░░░█░░ 1:몸통 박치기(MP:40) 2:물대포(MP:60)
░░░▀▄░░▀░░░█░░░░░░█░░ 3:화염방사(MP:100) 4:상점
░░░▄▀░░░░░░█░░░░▄▀░░░
░░░▀▄▀▄▄▀░░█▀░▄▀░░░░░
░░░░░░░░█▀▀█▀▀░░░░░░░
░░░░░░░░▀▀░▀▀░░░░░░░░⠀⠀⠀⠀''')
self.hp.printmyhp()
self.hp.manaupdate()
print('''
⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀''')
time.sleep(0.25)
os.system('cls')
if self.hp1.choice=='1':
self.hp.headbutt()
self.hp1.choice='0'
if self.hp1.choice=='2':
self.hp.watergun()
self.hp1.choice='0'
if self.hp1.choice=='3':
self.hp.fire()
self.hp1.choice='0'
if self.hp1.choice=='4':
self.store()
if self.hp.enemyhealth<=0:
print('게임 클리어!')
break
if self.hp.myhealth<=0:
print('패배')
break
if __name__=="__main__":
st=Print()
st.start()
| [
"soo8608@naver.com"
] | soo8608@naver.com |
5d1f082d6d49219104ea342bd2d205351bf8267c | a4a01e251b194f6d3c6654a2947a33fec2c03e80 | /PythonWeb/Django/1809/Day02/DjangoDemo01/sport/apps.py | 8dc5fb5c302ddd038a2fa5369b1ba78cfd405151 | [] | no_license | demo112/1809 | 033019043e2e95ebc637b40eaf11c76bfd089626 | e22972229e5e7831dce2aae0b53ce19a6e3bb106 | refs/heads/master | 2020-04-09T07:10:49.906231 | 2019-02-27T13:08:45 | 2019-02-27T13:08:45 | 160,143,869 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 85 | py | from django.apps import AppConfig
class SprotConfig(AppConfig):
name = 'sport'
| [
"huafengdongji@hotmail.com"
] | huafengdongji@hotmail.com |
28bfd3327dfe1e2148f26cf770139073cb414e6b | 1a8763a8e6140e42e1001d47c1b1701169ea28a3 | /src/server/portal/telnet.py | d0a074c7eb0e0b9334d56193ba208a22d0a60c29 | [
"BSD-3-Clause",
"Artistic-1.0"
] | permissive | google-code-export/evennia | 09626b60e0f8fb15b5c56ff6385d7b89428e6600 | f458265bec64909d682736da6a118efa21b42dfc | refs/heads/master | 2016-09-06T16:35:28.146332 | 2014-01-26T11:41:59 | 2014-01-26T11:41:59 | 32,133,110 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 7,871 | py | """
This module implements the telnet protocol.
This depends on a generic session module that implements
the actual login procedure of the game, tracks
sessions etc.
"""
import re
from twisted.conch.telnet import Telnet, StatefulTelnetProtocol, IAC, LINEMODE
from src.server.session import Session
from src.server.portal import ttype, mssp, msdp
from src.server.portal.mccp import Mccp, mccp_compress, MCCP
from src.utils import utils, ansi, logger
_RE_N = re.compile(r"\{n$")
class TelnetProtocol(Telnet, StatefulTelnetProtocol, Session):
"""
Each player connecting over telnet (ie using most traditional mud
clients) gets a telnet protocol instance assigned to them. All
communication between game and player goes through here.
"""
def connectionMade(self):
"""
This is called when the connection is first
established.
"""
# initialize the session
self.iaw_mode = False
client_address = self.transport.client
self.init_session("telnet", client_address, self.factory.sessionhandler)
# negotiate mccp (data compression)
self.mccp = Mccp(self)
# negotiate ttype (client info)
self.ttype = ttype.Ttype(self)
# negotiate mssp (crawler communication)
self.mssp = mssp.Mssp(self)
# msdp
self.msdp = msdp.Msdp(self)
# add this new connection to sessionhandler so
# the Server becomes aware of it.
# This is a fix to make sure the connection does not
# continue until the handshakes are done. This is a
# dumb delay of 1 second. This solution is not ideal (and
# potentially buggy for slow connections?) but
# adding a callback chain to all protocols (and notably
# to their handshakes, which in some cases are multi-part)
# is not trivial. Without it, the protocol will default
# to their defaults since sessionhandler.connect will sync
# before the handshakes have had time to finish. Keeping this patch
# until coming up with a more elegant solution /Griatch
from src.utils.utils import delay
delay(1, self, self.sessionhandler.connect)
#self.sessionhandler.connect(self)
def enableRemote(self, option):
"""
This sets up the remote-activated options we allow for this protocol.
"""
return (option == LINEMODE or
option == ttype.TTYPE or
option == MCCP or
option == mssp.MSSP)
def enableLocal(self, option):
"""
Call to allow the activation of options for this protocol
"""
return option == MCCP
def disableLocal(self, option):
"""
Disable a given option
"""
if option == MCCP:
self.mccp.no_mccp(option)
return True
else:
return super(TelnetProtocol, self).disableLocal(option)
def connectionLost(self, reason):
"""
This is executed when the connection is lost for
whatever reason. It can also be called directly, from
the disconnect method
"""
self.sessionhandler.disconnect(self)
self.transport.loseConnection()
def dataReceived(self, data):
"""
This method will split the incoming data depending on if it
starts with IAC (a telnet command) or not. All other data will
be handled in line mode. Some clients also sends an erroneous
line break after IAC, which we must watch out for.
"""
if data and data[0] == IAC or self.iaw_mode:
try:
#print "IAC mode"
super(TelnetProtocol, self).dataReceived(data)
if len(data) == 1:
self.iaw_mode = True
else:
self.iaw_mode = False
return
except Exception, err1:
conv = ""
try:
for b in data:
conv += " " + repr(ord(b))
except Exception, err2:
conv = str(err2) + ":", str(data)
out = "Telnet Error (%s): %s (%s)" % (err1, data, conv)
logger.log_trace(out)
return
# if we get to this point the command must end with a linebreak.
# We make sure to add it, to fix some clients messing this up.
data = data.rstrip("\r\n") + "\n"
#print "line data in:", repr(data)
StatefulTelnetProtocol.dataReceived(self, data)
def _write(self, data):
"hook overloading the one used in plain telnet"
# print "_write (%s): %s" % (self.state, " ".join(str(ord(c)) for c in data))
data = data.replace('\n', '\r\n').replace('\r\r\n', '\r\n')
#data = data.replace('\n', '\r\n')
super(TelnetProtocol, self)._write(mccp_compress(self, data))
def sendLine(self, line):
"hook overloading the one used by linereceiver"
#print "sendLine (%s):\n%s" % (self.state, line)
#escape IAC in line mode, and correctly add \r\n
line += self.delimiter
line = line.replace(IAC, IAC + IAC).replace('\n', '\r\n')
return self.transport.write(mccp_compress(self, line))
def lineReceived(self, string):
"""
Telnet method called when data is coming in over the telnet
connection. We pass it on to the game engine directly.
"""
self.data_in(text=string)
# Session hooks
def disconnect(self, reason=None):
"""
generic hook for the engine to call in order to
disconnect this protocol.
"""
if reason:
self.data_out(reason)
self.connectionLost(reason)
def data_in(self, text=None, **kwargs):
"""
Data Telnet -> Server
"""
self.sessionhandler.data_in(self, text=text, **kwargs)
def data_out(self, text=None, **kwargs):
"""
Data Evennia -> Player.
generic hook method for engine to call in order to send data
through the telnet connection.
valid telnet kwargs:
raw=True - pass string through without any ansi
processing (i.e. include Evennia ansi markers but do
not convert them into ansi tokens)
nomarkup=True - strip all ansi markup
The telnet ttype negotiation flags, if any, are used if no kwargs
are given.
"""
try:
text = utils.to_str(text if text else "", encoding=self.encoding)
except Exception, e:
self.sendLine(str(e))
return
if "oob" in kwargs:
oobstruct = self.sessionhandler.oobstruct_parser(kwargs.pop("oob"))
if "MSDP" in self.protocol_flags:
for cmdname, args, kwargs in oobstruct:
#print "cmdname, args, kwargs:", cmdname, args, kwargs
msdp_string = self.msdp.evennia_to_msdp(cmdname, *args, **kwargs)
#print "msdp_string:", msdp_string
self.msdp.data_out(msdp_string)
ttype = self.protocol_flags.get('TTYPE', {})
raw = kwargs.get("raw", False)
nomarkup = not (ttype or ttype.get('256 COLORS') or ttype.get('ANSI') or not ttype.get("init_done"))
nomarkup = kwargs.get("nomarkup", nomarkup)
if raw:
# no processing whatsoever
self.sendLine(text)
else:
# we need to make sure to kill the color at the end in order
# to match the webclient output.
# print "telnet data out:", self.protocol_flags, id(self.protocol_flags), id(self)
self.sendLine(ansi.parse_ansi(_RE_N.sub("", text) + "{n", strip_ansi=nomarkup, xterm256=ttype.get('256 COLORS')))
| [
"Khandrish@localhost"
] | Khandrish@localhost |
0cd635e0c45b489dbb6733fdf50f79553aadc6a0 | 2466271ecbd49dae14182827af759e47e1b44f34 | /preprocess.py | 0b59d3a0f9af5cfbc12a8953626ccec99ed0b937 | [
"MIT"
] | permissive | lambda-stockly/build-stockly-DS | 226d6f787754ef28c9f5b63aeb310fb84eb28cdf | e5dd50b574298ca5eaff54c5e25ec15dd666d63c | refs/heads/master | 2020-05-24T13:26:32.912862 | 2019-05-26T01:10:37 | 2019-05-26T01:10:37 | 187,289,712 | 1 | 3 | MIT | 2019-11-01T21:30:38 | 2019-05-17T22:43:15 | Python | UTF-8 | Python | false | false | 21,619 | py | import fbprophet
import pandas as pd
import numpy as np
from alpha_vantage.timeseries import TimeSeries
import os
class Magic():
'''
original script from :
https://github.com/WillKoehrsen/Data-Analysis/blob/master/stocker/stocker.py
credit goes to this script.
'''
#Initialize parameters
def __init__(self, ticker):
# might need to initialize with a local variable that stores the actual key
# otherwise os.getenv() will look for the environment variable
# and if the name of the environment variable is not the same
# then this will not work!!!
# ALPHAVANTAGE_API_KEY = 'SXG08DL4S2EW8SKC'
ALPHAVANTAGE_API_KEY = os.getenv('ALPHAVANTAGE_API_KEY')
ts = TimeSeries(key=ALPHAVANTAGE_API_KEY, output_format='pandas')
ticker = ticker.upper()
self.symbol = ticker
try:
data, meta_data = ts.get_daily(self.symbol, outputsize='full')
except Exception as e:
print('Error retrieving Stock Data...')
print(e)
return
data = data.reset_index(level=0)
data['date'] = pd.to_datetime(data['date'])
data['ds'] = data['date']
data = data.rename(columns={
'date': 'Date', '1. open': 'Open', '2. high': 'High',
'3. low': 'Low', '4. close': 'Close', '5. volume': 'Volume'
})
if ('Adj. Close' not in data.columns):
data['Adj. Close'] = data['Close']
data['Adj. Open'] = data['Open']
data['y'] = data['Adj. Close']
data['Daily Change'] = data['Adj. Close'] - data['Adj. Open']
self.stock = data.copy()
self.min_date = min(data['Date'])
self.max_date = max(data['Date'])
self.max_price = np.max(self.stock['y'])
self.min_price = np.min(self.stock['y'])
self.min_price_date = self.stock[self.stock['y'] == self.min_price]['Date']
self.min_price_date = self.min_price_date[self.min_price_date.index[0]]
self.max_price_date = self.stock[self.stock['y'] == self.max_price]['Date']
self.max_price_date = self.max_price_date[self.max_price_date.index[0]]
self.starting_price = float(self.stock.loc[0, 'Adj. Open'])
self.most_recent_price = float(self.stock.loc[self.stock.index[-1], 'y'])
self.round_dates = True
self.training_years = 3
self.changepoint_prior_scale = 0.05
self.weekly_seasonality = False
self.daily_seasonality = False
self.monthly_seasonality = True
self.yearly_seasonality = True
self.changepoints = None
print('{} Preprocessing Initialized. Data covers {} to {}.'.format(self.symbol,
self.min_date,
self.max_date))
"""
Make sure start and end dates are in the range and can be
converted to pandas datetimes. Returns dates in the correct format
"""
def handle_dates(self, start_date, end_date):
# Default start and end date are the beginning and end of data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
try:
# Convert to pandas datetime for indexing dataframe
start_date = pd.to_datetime(start_date)
end_date = pd.to_datetime(end_date)
except Exception as e:
print('Enter valid pandas date format.')
print(e)
return
valid_start = False
valid_end = False
# User will continue to enter dates until valid dates are met
while (not valid_start) & (not valid_end):
valid_end = True
valid_start = True
if end_date < start_date:
print('End Date must be later than start date.')
start_date = pd.to_datetime(input('Enter a new start date: '))
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
valid_start = False
else:
if end_date > self.max_date:
print('End Date exceeds data range')
end_date= pd.to_datetime(input('Enter a new end date: '))
valid_end = False
if start_date < self.min_date:
print('Start Date is before date range')
start_date = pd.to_datetime(input('Enter a new start date: '))
valid_start = False
return start_date, end_date
def make_a_df(self,start_date=None, end_date=None,df=None):
'''
Added by Chris Louie for stockly
'''
# Default is to use the object stock data
if start_date is None:
start_date = self.min_date
if end_date is None:
end_date = self.max_date
if not df:
df = self.stock.copy()
start_date, end_date = self.handle_dates(start_date, end_date)
# keep track of whether the start and end dates are in the data
start_in = True
end_in = True
# If user wants to round dates (default behavior)
if self.round_dates:
# Record if start and end date are in df
if (start_date not in list(df['Date'])):
start_in = False
if (end_date not in list(df['Date'])):
end_in = False
# If both are not in dataframe, round both
if (not end_in) & (not start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If both are in dataframe, round neither
if (end_in) & (start_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date)]
else:
# If only start is missing, round start
if (not start_in):
trim_df = df[(df['Date'] > start_date) &
(df['Date'] <= end_date)]
# If only end is missing round end
elif (not end_in):
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] < end_date)]
else:
valid_start = False
valid_end = False
while (not valid_start) & (not valid_end):
start_date, end_date = self.handle_dates(start_date, end_date)
# No round dates, if either data not in, print message and return
if (start_date in list(df['Date'])):
valid_start = True
if (end_date in list(df['Date'])):
valid_end = True
# Check to make sure dates are in the data
if (start_date not in list(df['Date'])):
print('Start Date not in data (either out of range or not a trading day.)')
start_date = pd.to_datetime(input(prompt='Enter a new start date: '))
elif (end_date not in list(df['Date'])):
print('End Date not in data (either out of range or not a trading day.)')
end_date = pd.to_datetime(input(prompt='Enter a new end date: ') )
# Dates are not rounded
trim_df = df[(df['Date'] >= start_date) &
(df['Date'] <= end_date.date)]
up_days = []
down_days = []
for i in range(0,len(trim_df)):
if trim_df['Daily Change'][i] > 0:
up_days.append(1)
down_days.append(0)
elif trim_df['Daily Change'][i] < 0:
down_days.append(1)
up_days.append(0)
else:
down_days.append(0)
up_days.append(0)
print(len(up_days))
print(len(down_days))
trim_df['Up Days'] = up_days
trim_df['Down Days'] = down_days
return trim_df
def resample(self, dataframe):
# Change the index and resample at daily level
dataframe = dataframe.set_index('ds')
dataframe = dataframe.resample('D')
# Reset the index and interpolate nan values
dataframe = dataframe.reset_index(level=0)
dataframe = dataframe.interpolate()
return dataframe
def remove_weekends(self, dataframe):
# Reset index to use ix
dataframe = dataframe.reset_index(drop=True)
weekends = []
# Find all of the weekends
for i, date in enumerate(dataframe['ds']):
if (date.weekday()) == 5 | (date.weekday() == 6):
weekends.append(i)
# Drop the weekends
dataframe = dataframe.drop(weekends, axis=0)
return dataframe
def create_model(self):
# Make the model
model = fbprophet.Prophet(daily_seasonality=self.daily_seasonality,
weekly_seasonality=self.weekly_seasonality,
yearly_seasonality=self.yearly_seasonality,
changepoint_prior_scale=self.changepoint_prior_scale,
changepoints=self.changepoints)
if self.monthly_seasonality:
# Add monthly seasonality
model.add_seasonality(name = 'monthly', period = 30.5, fourier_order = 5)
return model
def create_prophet_model(self, days=0, resample=False):
model = self.create_model()
# Fit on the stock history for self.training_years number of years
stock_history = self.stock[self.stock['Date'] > (self.max_date - pd.DateOffset(years = self.training_years))]
if resample:
stock_history = self.resample(stock_history)
model.fit(stock_history)
# Make and predict for next year with future dataframe
future = model.make_future_dataframe(periods = days, freq='D')
future = model.predict(future)
if days > 0:
# Print the predicted price
print('Predicted Price on {} = ${:.2f}'.format(
future.loc[future.index[-1], 'ds'], future.loc[future.index[-1], 'yhat']))
return model, future
def evaluate_prediction(self, start_date=None, end_date=None, nshares = None):
# Default start date is one year before end of data
# Default end date is end date of data
if start_date is None:
start_date = self.max_date - pd.DateOffset(years=1)
if end_date is None:
end_date = self.max_date
start_date, end_date = self.handle_dates(start_date, end_date)
# Training data starts self.training_years years before start date and goes up to start date
train = self.stock[(self.stock['Date'] < start_date) &
(self.stock['Date'] > (start_date - pd.DateOffset(years=self.training_years)))]
# Testing data is specified in the range
test = self.stock[(self.stock['Date'] >= start_date) & (self.stock['Date'] <= end_date)]
# Create and train the model
model = self.create_model()
model.fit(train)
# Make a future dataframe and predictions
future = model.make_future_dataframe(periods = 365, freq='D')
future = model.predict(future)
# Merge predictions with the known values
test = pd.merge(test, future, on = 'ds', how = 'inner')
train = pd.merge(train, future, on = 'ds', how = 'inner')
# Calculate the differences between consecutive measurements
test['pred_diff'] = test['yhat'].diff()
test['real_diff'] = test['y'].diff()
# Correct is when we predicted the correct direction
test['correct'] = (np.sign(test['pred_diff'][1:]) == np.sign(test['real_diff'][1:])) * 1
# Accuracy when we predict increase and decrease
increase_accuracy = 100 * np.mean(test[test['pred_diff'] > 0]['correct'])
decrease_accuracy = 100 * np.mean(test[test['pred_diff'] < 0]['correct'])
# Calculate mean absolute error
test_errors = abs(test['y'] - test['yhat'])
test_mean_error = np.mean(test_errors)
train_errors = abs(train['y'] - train['yhat'])
train_mean_error = np.mean(train_errors)
# Calculate percentage of time actual value within prediction range
test['in_range'] = False
for i in test.index:
if (test.loc[i, 'y'] < test.loc[i, 'yhat_upper']) & (test.loc[i, 'y'] > test.loc[i, 'yhat_lower']):
test.loc[i, 'in_range'] = True
in_range_accuracy = 100 * np.mean(test['in_range'])
if not nshares:
# Date range of predictions
print('\nPrediction Range: {} to {}.'.format(start_date,
end_date))
# Final prediction vs actual value
print('\nPredicted price on {} = ${:.2f}.'.format(max(future['ds']), future.loc[future.index[-1], 'yhat']))
print('Actual price on {} = ${:.2f}.\n'.format(max(test['ds']), test.loc[test.index[-1], 'y']))
print('Average Absolute Error on Training Data = ${:.2f}.'.format(train_mean_error))
print('Average Absolute Error on Testing Data = ${:.2f}.\n'.format(test_mean_error))
# Direction accuracy
print('When the model predicted an increase, the price increased {:.2f}% of the time.'.format(increase_accuracy))
print('When the model predicted a decrease, the price decreased {:.2f}% of the time.\n'.format(decrease_accuracy))
print('The actual value was within the {:d}% confidence interval {:.2f}% of the time.'.format(int(100 * model.interval_width), in_range_accuracy))
# If a number of shares is specified, play the game
elif nshares:
# Only playing the stocks when we predict the stock will increase
test_pred_increase = test[test['pred_diff'] > 0]
test_pred_increase.reset_index(inplace=True)
prediction_profit = []
# Iterate through all the predictions and calculate profit from playing
for i, correct in enumerate(test_pred_increase['correct']):
# If we predicted up and the price goes up, we gain the difference
if correct == 1:
prediction_profit.append(nshares * test_pred_increase.loc[i, 'real_diff'])
# If we predicted up and the price goes down, we lose the difference
else:
prediction_profit.append(nshares * test_pred_increase.loc[i, 'real_diff'])
test_pred_increase['pred_profit'] = prediction_profit
# Put the profit into the test dataframe
test = pd.merge(test, test_pred_increase[['ds', 'pred_profit']], on = 'ds', how = 'left')
test.loc[0, 'pred_profit'] = 0
# Profit for either method at all dates
test['pred_profit'] = test['pred_profit'].cumsum().ffill()
test['hold_profit'] = nshares * (test['y'] - float(test.loc[0, 'y']))
# Display information
print('You played the stock market in {} from {} to {} with {} shares.\n'.format(
self.symbol, start_date, end_date, nshares))
print('When the model predicted an increase, the price increased {:.2f}% of the time.'.format(increase_accuracy))
print('When the model predicted a decrease, the price decreased {:.2f}% of the time.\n'.format(decrease_accuracy))
# Display some friendly information about the perils of playing the stock market
print('The total profit using the Prophet model = ${:.2f}.'.format(np.sum(prediction_profit)))
print('The Buy and Hold strategy profit = ${:.2f}.'.format(float(test.loc[test.index[-1], 'hold_profit'])))
print('\nThanks for playing the stock market!\n')
# Plot the predicted and actual profits over time
# Final profit and final smart used for locating text
final_profit = test.loc[test.index[-1], 'pred_profit']
final_smart = test.loc[test.index[-1], 'hold_profit']
# text location
last_date = test.loc[test.index[-1], 'ds']
text_location = (last_date - pd.DateOffset(months = 1))
return test
def make_a_future_dataframe(self,periods=30,freq='D'):
'''
Added by Chris Louie for stockly
'''
train = self.stock[self.stock['Date'] > (max(self.stock['Date']) - pd.DateOffset(years=self.training_years))]
model = self.create_model()
model.fit(train)
future = model.make_future_dataframe(periods=periods,freq=freq)
future = model.predict(future)
preds = future[future['ds'] >= max(self.stock['Date'])]
preds = self.remove_weekends(preds)
preds['diff'] = preds['yhat'].diff()
preds = preds.dropna()
preds['direction'] = (preds['diff'] > 0) * 1
preds = preds.rename(columns={
'ds': 'Date', 'yhat': 'estimate', 'diff': 'change',
'yhat_upper': 'upper', 'yhat_lower': 'lower'
})
preds = preds.reset_index()
up_days = []
down_days = []
for i in range(len(preds)):
if preds['estimate'][i] > 0:
up_days.append(1)
down_days.append(0)
elif preds['estimate'][i] < 0:
down_days.append(1)
up_days.append(0)
else:
down_days.append(0)
up_days.append(0)
print(len(up_days))
print(len(down_days))
preds['Up Days'] = up_days
preds['Down Days'] = down_days
return preds
# Predict the future price for a given range of days
def predict_future(self, days=30):
# Use past self.training_years years for training
train = self.stock[self.stock['Date'] > (max(self.stock['Date']) - pd.DateOffset(years=self.training_years))]
model = self.create_model()
model.fit(train)
# Future dataframe with specified number of days to predict
future = model.make_future_dataframe(periods=days, freq='D')
future = model.predict(future)
# Only concerned with future dates
future = future[future['ds'] >= max(self.stock['Date'])]
# Remove the weekends
future = self.remove_weekends(future)
# Calculate whether increase or not
future['diff'] = future['yhat'].diff()
future = future.dropna()
# Find the prediction direction and create separate dataframes
future['direction'] = (future['diff'] > 0) * 1
# Rename the columns for presentation
future = future.rename(columns={'ds': 'Date', 'yhat': 'estimate', 'diff': 'change',
'yhat_upper': 'upper', 'yhat_lower': 'lower'})
future_increase = future[future['direction'] == 1]
future_decrease = future[future['direction'] == 0]
# Print out the dates
print('\nPredicted Increase: \n')
print(future_increase[['Date', 'estimate', 'change', 'upper', 'lower']])
print('\nPredicted Decrease: \n')
print(future_decrease[['Date', 'estimate', 'change', 'upper', 'lower']])
return future
def output_historical(self):
'''
This method is for storing an output for the predict_future method.
Create softmax probability for whether player should buy hold or sell
'''
def softmax(x):
"""
Compute softmax values for each sets of scores in x.
"""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
output = self.make_a_df()
average_delta = np.mean(output['Daily Change'])
buy = sum(output['Up Days'] == 1)
sell = sum(output['Down Days'] == 1)
if average_delta > 1:
hold = average_delta
elif average_delta < -1:
hold = -average_delta
else:
hold = (buy+sell+average_delta)/3
scores = [sell,hold,buy]
values = softmax(scores)
keys = ['Sell','Hold','Buy']
historical_analysis = dict(zip(keys,values))
return historical_analysis
def output_future(self):
'''
This method is for storing an output for the predict_future method.
Create softmax probability for whether player should buy hold or sell
'''
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
e_x = np.exp(x - np.max(x))
return e_x / e_x.sum(axis=0)
future_model = self.predict_future()
average_delta = np.mean(future_model['change'])
buy = sum(future_model['direction'] == 1)
sell = sum(future_model['direction'] == 0)
if average_delta > 1:
hold = average_delta
elif average_delta < -1:
hold = -average_delta
else:
hold = (buy+sell+average_delta)/3
scores = [sell,hold,buy]
values = softmax(scores)
keys = ['Sell','Hold','Buy']
future_analysis = dict(zip(keys,values))
return future_analysis
| [
"chrisl6uie@gmail.com"
] | chrisl6uie@gmail.com |
6319596efc7129f373479affcbde45af66ccb8e2 | 556da038494ad93b03923577b48f89dd6d70fb48 | /1031 Hello World for U.py | ffcb9e72ef2b82797d281416b6e65dd22a397e27 | [] | no_license | junyechen/PAT-Advanced-Level-Practice | f5c9f604c458965c2165960aaac714f69ce1057b | 401c9d3040a0273c0e2461c963b781bcebd33667 | refs/heads/master | 2020-06-19T10:55:19.564725 | 2020-05-12T10:21:14 | 2020-05-12T10:21:14 | 196,684,047 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,634 | py | """
Given any string of N (≥5) characters, you are asked to form the characters into the shape of U. For example, helloworld can be printed as:
h d
e l
l r
lowo
That is, the characters must be printed in the original order, starting top-down from the left vertical line with n
1
characters, then left to right along the bottom line with n
2
characters, and finally bottom-up along the vertical line with n
3
characters. And more, we would like U to be as squared as possible -- that is, it must be satisfied that n
1
=n
3
=max { k | k≤n
2
for all 3≤n
2
≤N } with n
1
+n
2
+n
3
−2=N.
Input Specification:
Each input file contains one test case. Each case contains one string with no less than 5 and no more than 80 characters in a line. The string contains no white space.
Output Specification:
For each test case, print the input string in the shape of U as specified in the description.
Sample Input:
helloworld!
Sample Output:
h !
e d
l l
lowor
"""
#########################################################
"""
本题非常简单,一次通过
"""
#########################################################
string = input()
n1 = (len(string)+2)//3
n2 = len(string)+2-n1*2
pattern = [[' ' for _ in range(n2)] for _ in range(n1)]
for i in range(n1):
pattern[i][0] = string[i]
for i in range(n2):
pattern[n1-1][i] = string[n1-1+i]
for i in range(n1):
pattern[n1-1-i][n2-1] = string[n1+n2-2+i]
for i in range(n1):
for j in range(n2):
print(pattern[i][j], end='')
print('')
| [
"junyechen@zju.edu.cn"
] | junyechen@zju.edu.cn |
76fb42b299c2918528250c19cf7d0c3d83e67a9c | ef6f96d2331319f2a6b5a680dca10b24cca5ac09 | /Sorting/Quicksort 1 - Partition.py | df1281bf8bd31e055e409075ec7fa201e989ea57 | [] | no_license | ralba1997/Homework2 | f04e856e1625fcb1767520320f9b7a18ebe27468 | 9d122074b018bfab2b636ce8233884a63491b01d | refs/heads/master | 2021-08-22T07:33:42.135006 | 2017-11-29T16:40:53 | 2017-11-29T16:40:53 | 112,383,512 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 220 | py | n = int(input())
ar = list(map(int, input().split()))
pivot = ar[0]
left = []
right = []
for i in ar:
if i < pivot:
left += [i]
elif i > pivot:
right += [i]
print(*left, pivot, *right)
| [
"noreply@github.com"
] | ralba1997.noreply@github.com |
59dbf74544c020ce1e302f50a5b7296773fb3383 | 7c1f9b4fb8b6ee1d0f0d240e903a49bad41dd59f | /src/dcgan_torch.py | e96f9a6b566c26a7e980b46dae9f23ded0602308 | [
"MIT"
] | permissive | alperyeg/spiking_GANs | eea1dcbb7063c6e5037c887d95fe5992b6eb7e12 | 9249093db043c0b41eb1e935dd09f5d415307ca5 | refs/heads/master | 2021-03-30T17:50:20.514454 | 2018-08-06T18:23:25 | 2018-08-06T18:23:25 | 91,696,918 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,774 | py | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from torch.autograd import Variable
parser = argparse.ArgumentParser()
parser.add_argument('--dataset', required=True,
help='cifar10 | lsun | imagenet | folder | lfw | fake')
parser.add_argument('--dataroot', required=True, help='path to dataset')
parser.add_argument('--workers', type=int,
help='number of data loading workers', default=2)
parser.add_argument('--batchSize', type=int, default=64,
help='input batch size')
parser.add_argument('--imageSize', type=int, default=64,
help='the height / width of the input image to network')
parser.add_argument('--nz', type=int, default=100,
help='size of the latent z vector')
parser.add_argument('--ngf', type=int, default=64)
parser.add_argument('--ndf', type=int, default=64)
parser.add_argument('--niter', type=int, default=25,
help='number of epochs to train for')
parser.add_argument('--lr', type=float, default=0.0002,
help='learning rate, default=0.0002')
parser.add_argument('--beta1', type=float, default=0.5,
help='beta1 for adam. default=0.5')
parser.add_argument('--cuda', action='store_true', help='enables cuda')
parser.add_argument('--ngpu', type=int, default=1,
help='number of GPUs to use')
parser.add_argument('--netG', default='',
help="path to netG (to continue training)")
parser.add_argument('--netD', default='',
help="path to netD (to continue training)")
parser.add_argument('--outf', default='.',
help='folder to output images and model checkpoints')
parser.add_argument('--manualSeed', type=int, help='manual seed')
opt = parser.parse_args()
print(opt)
try:
os.makedirs(opt.outf)
except OSError:
pass
if opt.manualSeed is None:
opt.manualSeed = random.randint(1, 10000)
print("Random Seed: ", opt.manualSeed)
random.seed(opt.manualSeed)
torch.manual_seed(opt.manualSeed)
if opt.cuda:
torch.cuda.manual_seed_all(opt.manualSeed)
cudnn.benchmark = True
if torch.cuda.is_available() and not opt.cuda:
print(
"WARNING: You have a CUDA device, so you should probably run with --cuda")
if opt.dataset in ['imagenet', 'folder', 'lfw']:
# folder dataset
dataset = dset.ImageFolder(root=opt.dataroot,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'lsun':
dataset = dset.LSUN(db_path=opt.dataroot, classes=['bedroom_train'],
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.CenterCrop(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'cifar10':
dataset = dset.CIFAR10(root=opt.dataroot, download=True,
transform=transforms.Compose([
transforms.Scale(opt.imageSize),
transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5),
(0.5, 0.5, 0.5)),
]))
elif opt.dataset == 'fake':
dataset = dset.FakeData(image_size=(3, opt.imageSize, opt.imageSize),
transform=transforms.ToTensor())
assert dataset
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize,
shuffle=True,
num_workers=int(opt.workers))
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
m.weight.data.normal_(0.0, 0.02)
elif classname.find('BatchNorm') != -1:
m.weight.data.normal_(1.0, 0.02)
m.bias.data.fill_(0)
class _netG(nn.Module):
def __init__(self, ngpu):
super(_netG, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 8, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 8),
nn.ReLU(True),
# state size. (ngf*8) x 4 x 4
nn.ConvTranspose2d(ngf * 8, ngf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
# state size. (ngf*4) x 8 x 8
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
# state size. (ngf*2) x 16 x 16
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
# state size. (ngf) x 32 x 32
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh()
# state size. (nc) x 64 x 64
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
return output
netG = _netG(ngpu)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
class _netD(nn.Module):
def __init__(self, ngpu):
super(_netD, self).__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid()
)
def forward(self, input):
if isinstance(input.data, torch.cuda.FloatTensor) and self.ngpu > 1:
output = nn.parallel.data_parallel(self.main, input,
range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1).squeeze(1)
netD = _netD(ngpu)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
input = torch.FloatTensor(opt.batchSize, 3, opt.imageSize, opt.imageSize)
noise = torch.FloatTensor(opt.batchSize, nz, 1, 1)
fixed_noise = torch.FloatTensor(opt.batchSize, nz, 1, 1).normal_(0, 1)
label = torch.FloatTensor(opt.batchSize)
real_label = 1
fake_label = 0
if opt.cuda:
netD.cuda()
netG.cuda()
criterion.cuda()
input, label = input.cuda(), label.cuda()
noise, fixed_noise = noise.cuda(), fixed_noise.cuda()
fixed_noise = Variable(fixed_noise)
# setup optimizer
optimizerD = optim.Adam(netD.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
optimizerG = optim.Adam(netG.parameters(), lr=opt.lr, betas=(opt.beta1, 0.999))
for epoch in range(opt.niter):
for i, data in enumerate(dataloader, 0):
############################
# (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
###########################
# train with real
netD.zero_grad()
real_cpu, _ = data
batch_size = real_cpu.size(0)
if opt.cuda:
real_cpu = real_cpu.cuda()
input.resize_as_(real_cpu).copy_(real_cpu)
label.resize_(batch_size).fill_(real_label)
inputv = Variable(input)
labelv = Variable(label)
output = netD(inputv)
errD_real = criterion(output, labelv)
errD_real.backward()
D_x = output.data.mean()
# train with fake
noise.resize_(batch_size, nz, 1, 1).normal_(0, 1)
noisev = Variable(noise)
fake = netG(noisev)
labelv = Variable(label.fill_(fake_label))
output = netD(fake.detach())
errD_fake = criterion(output, labelv)
errD_fake.backward()
D_G_z1 = output.data.mean()
errD = errD_real + errD_fake
optimizerD.step()
############################
# (2) Update G network: maximize log(D(G(z)))
###########################
netG.zero_grad()
labelv = Variable(
label.fill_(real_label)) # fake labels are real for generator cost
output = netD(fake)
errG = criterion(output, labelv)
errG.backward()
D_G_z2 = output.data.mean()
optimizerG.step()
print(
'[%d/%d][%d/%d] Loss_D: %.4f Loss_G: %.4f D(x): %.4f D(G(z)): %.4f / %.4f'
% (epoch, opt.niter, i, len(dataloader),
errD.data[0], errG.data[0], D_x, D_G_z1, D_G_z2))
if i % 100 == 0:
vutils.save_image(real_cpu,
'%s/real_samples.png' % opt.outf,
normalize=True)
fake = netG(fixed_noise)
vutils.save_image(fake.data,
'%s/fake_samples_epoch_%03d.png' % (
opt.outf, epoch),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epoch))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epoch))
| [
"a.yegenoglu@fz-juelich.de"
] | a.yegenoglu@fz-juelich.de |
b913f415c483e980c7ccb6d33a689d30036be6b2 | 517d0f427fc3977c854cb5ff5ede6e94ca990c98 | /code/merge-csv-to-geojson.py | c16c04ec9a4d73ae6f95d2afe48596b3ef23084c | [] | no_license | ceumicrodata/valasztas-terkep | 8259e992b3b48207bad6571aeecbacacf2302893 | 0dc63326a93151c86282d6fbc81c6792fffeb9a1 | refs/heads/master | 2016-09-05T13:54:39.815720 | 2014-04-15T20:36:04 | 2014-04-15T20:36:04 | 18,667,841 | 1 | 1 | null | null | null | null | UTF-8 | Python | false | false | 1,641 | py | import json
import unicodecsv
csv_file = unicodecsv.DictReader(open("../consistent/vote_counts_precincts.csv", "r"))
json_file = json.load(open("../consistent/szavazokorok.geojson"))
NUMERIC_FIELDS = ('szavazok','reszvetel','mcp','haza_nem_elado','sms','fkgp','udp','fidesz','sem','lmp','jesz','ump','munkaspart','szocdemek','kti','egyutt2014','zoldek','osszefogas','kormanyvaltok','jobbik','osszes_listas','egyeni_fidesz','egyeni_kormanyvaltok','egyeni_jobbik','egyeni_lmp')
class Searchable(object):
# A hash table for fast left joins.
def __init__(self, list, index_field):
dct = {}
for item in list:
dct[item[index_field]] = item
self.hash_table = dct
def search(self, index_value):
if index_value in self.hash_table:
return self.hash_table[index_value]
else:
return None
def numeric_fields(dct):
for key in dct.keys():
if key in NUMERIC_FIELDS:
if key=="reszvetel":
dct[key] = float(dct[key])
else:
dct[key] = int(dct[key])
return dct
def listas_nyertes(dct):
listas = dict(fidesz=dct['fidesz'],
kormanyvaltok=dct['kormanyvaltok'],
jobbik=dct['jobbik'],
lmp=dct['lmp'])
legtobb_szavazat = max(listas.values())
return listas.keys()[listas.values().index(legtobb_szavazat)]
if __name__ == '__main__':
output_list = []
geo_table = Searchable(list=json_file['features'], index_field='id')
for item in csv_file:
output = geo_table.search(item['id'])
item = numeric_fields(item)
item.update(dict(listas_nyertes=listas_nyertes(item)))
output['properties'].update(item)
output_list.append(output)
print json.dumps(dict(features=output_list, type='FeatureCollection'))
| [
"miklos.koren@gmail.com"
] | miklos.koren@gmail.com |
bb010b096427cce84eb368767cc9d17ddb8f16db | a9fc496e0724866093dbb9cba70a8fdce12b67a9 | /scripts/field/eunwol_house.py | 131e7ecadea4a9957479632d96bd39eede25e3ea | [
"MIT"
] | permissive | ryantpayton/Swordie | b2cd6b605f7f08f725f5e35d23ba3c22ef2ae7c0 | ca6f42dd43f63b1d2e6bb5cdc8fc051c277f326e | refs/heads/master | 2022-12-01T09:46:47.138072 | 2020-03-24T10:32:20 | 2020-03-24T10:32:20 | 253,997,319 | 2 | 0 | MIT | 2022-11-24T08:17:54 | 2020-04-08T05:50:22 | Java | UTF-8 | Python | false | false | 878 | py | # 410000001
if sm.hasQuest(38002):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("What happened? A house and a new name... But what happened to my friends? Are they alive? If I am, then maybe we failed to seal the Black Mage...")
sm.sendSay("No. They wouldn't give up that easily. They're probably hiding out somewhere, waiting to get back together. I need to look after myself for now, and get my strength back.")
sm.sendSay("Level 10... It's better than nothing, but it's not the best feeling. I'll hang around and get stronger. That's the only thing I can do now.")
sm.setQRValue(38002, "clear", False)
elif sm.hasQuest(38018):
sm.removeEscapeButton()
sm.flipDialoguePlayerAsSpeaker()
sm.sendNext("W-what is that thing? It looks so fuzzy. I don't think I should touch it...")
sm.setQRValue(38018, "clear", False) | [
"mechaviv@gmail.com"
] | mechaviv@gmail.com |
545eba6991ea678d66f9d8b217baed4ee4ca6693 | 8d759626b30f22a1baf5c551079d0fe33c10a223 | /blog/blogs/admin.py | eb842d6ff69f4bf002f0b33f5786e8017c262dad | [] | no_license | ialeev1234/blogs | 421eb0a92fbae8dc64c82f50025cbd1d22539e61 | c05b575932af5b91ce04240387a4d84373dcd3b4 | refs/heads/master | 2021-10-19T20:54:00.375570 | 2019-02-22T18:03:59 | 2019-02-23T18:24:14 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 140 | py | from django.contrib import admin
from blog.blogs.models import Blogs
@admin.register(Blogs)
class BlogsAdmin(admin.ModelAdmin):
pass
| [
"ja@contuur.com"
] | ja@contuur.com |
0597bc968aa2d2f070269bc331c51b5923e976a4 | 45826bdfebbd1d7638ab607906ac480031d6118b | /lib/extensions/parallel/data_parallel.py | b776fa31d5cca2b6fa562c89ff3627a5e42bcc00 | [
"MIT"
] | permissive | openseg-group/openseg.pytorch | b75cec5c95b6ff71707d8daf7806001bab89ecb3 | aefc75517b09068d7131a69420bc5f66cb41f0ee | refs/heads/master | 2023-09-06T10:19:57.749113 | 2022-08-07T09:10:20 | 2022-08-07T09:10:20 | 166,743,301 | 1,227 | 159 | MIT | 2021-07-14T06:10:44 | 2019-01-21T03:34:59 | Python | UTF-8 | Python | false | false | 9,281 | py | ##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
## Created by: Hang Zhang
## ECE Department, Rutgers University
## Email: zhang.hang@rutgers.edu
## Copyright (c) 2017
##
## This source code is licensed under the MIT-style license found in the
## LICENSE file in the root directory of this source tree
##+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
"""Encoding Data Parallel"""
import functools
import threading
import torch
import torch.cuda.comm as comm
from torch.autograd import Function
from torch.nn.parallel._functions import Broadcast
from torch.nn.parallel.data_parallel import DataParallel
from torch.nn.parallel.parallel_apply import get_a_var
from torch.nn.parallel.scatter_gather import gather
from lib.extensions.parallel.scatter_gather import scatter_kwargs
torch_ver = torch.__version__[:3]
class Reduce(Function):
@staticmethod
def forward(ctx, *inputs):
ctx.target_gpus = [inputs[i].get_device() for i in range(len(inputs))]
inputs = sorted(inputs, key=lambda i: i.get_device())
return comm.reduce_add(inputs)
@staticmethod
def backward(ctx, gradOutput):
return Broadcast.apply(ctx.target_gpus, gradOutput)
class DataParallelModel(DataParallel):
"""Implements data parallelism at the module level.
This container parallelizes the application of the given module by
splitting the input across the specified devices by chunking in the
batch dimension.
In the forward pass, the module is replicated on each device,
and each replica handles a portion of the input. During the backwards pass, gradients from each replica are summed into the original module.
Note that the outputs are not gathered, please use compatible
:class:`encoding.parallel.DataParallelCriterion`.
The batch size should be larger than the number of GPUs used. It should
also be an integer multiple of the number of GPUs so that each chunk is
the same size (so that each GPU processes the same number of samples).
Args:
module: module to be parallelized
device_ids: CUDA devices (default: all devices)
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. "Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = DataParallelModel(model, device_ids=[0, 1, 2])
>>> y = net(x)
"""
def __init__(self, module, device_ids=None, output_device=None, dim=0, gather_=True):
super(DataParallelModel, self).__init__(module, device_ids, output_device, dim)
self.gather_ = gather_
def gather(self, outputs, output_device):
if self.gather_:
return gather(outputs, output_device, dim=self.dim)
return outputs
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def replicate(self, module, device_ids):
modules = super(DataParallelModel, self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
class DataParallelCriterion(DataParallel):
"""
Calculate loss in multiple-GPUs, which balance the memory usage for
Semantic Segmentation.
The targets are splitted across the specified devices by chunking in
the batch dimension. Please use together with :class:`encoding.parallel.DataParallelModel`.
Reference:
Hang Zhang, Kristin Dana, Jianping Shi, Zhongyue Zhang, Xiaogang Wang, Ambrish Tyagi,
Amit Agrawal. “Context Encoding for Semantic Segmentation.
*The IEEE Conference on Computer Vision and Pattern Recognition (CVPR) 2018*
Example::
>>> net = DataParallelModel(model, device_ids=[0, 1, 2])
>>> criterion = DataParallelCriterion(criterion, device_ids=[0, 1, 2])
>>> y = net(x)
>>> loss = criterion(y, target)
"""
def __init__(self, module, device_ids=None, output_device=None, dim=0):
super(DataParallelCriterion, self).__init__(module, device_ids, output_device, dim)
def scatter(self, inputs, kwargs, device_ids):
return scatter_kwargs(inputs, kwargs, device_ids, dim=self.dim)
def forward(self, inputs, *targets, gathered=True, **kwargs):
# input should be already scatterd
# scattering the targets instead
if gathered:
if isinstance(inputs, (list, tuple)):
inputs, _ = self.scatter(inputs, kwargs, self.device_ids)
else:
inputs, _ = self.scatter([inputs], kwargs, self.device_ids)
# inputs = tuple(inputs_per_gpu[0] for inputs_per_gpu in inputs)
if not self.device_ids:
return self.module(inputs, *targets, **kwargs)
targets, kwargs = self.scatter(targets, kwargs, self.device_ids)
if len(self.device_ids) == 1:
return self.module(inputs[0], *targets[0], **kwargs[0])
replicas = self.replicate(self.module, self.device_ids[:len(inputs)])
# targets = tuple(targets_per_gpu[0] for targets_per_gpu in targets)
outputs = _criterion_parallel_apply(replicas, inputs, targets, kwargs)
return Reduce.apply(*outputs) / len(outputs)
def _criterion_parallel_apply(modules, inputs, targets, kwargs_tup=None, devices=None):
assert len(modules) == len(inputs)
assert len(targets) == len(inputs)
if kwargs_tup:
assert len(modules) == len(kwargs_tup)
else:
kwargs_tup = ({},) * len(modules)
if devices is not None:
assert len(modules) == len(devices)
else:
devices = [None] * len(modules)
lock = threading.Lock()
results = {}
if torch_ver != "0.3":
grad_enabled = torch.is_grad_enabled()
def _worker(i, module, input, target, kwargs, device=None):
if torch_ver != "0.3":
torch.set_grad_enabled(grad_enabled)
if device is None:
device = get_a_var(input).get_device()
try:
with torch.cuda.device(device):
output = module(input, *target, **kwargs)
with lock:
results[i] = output
except Exception as e:
with lock:
results[i] = e
if len(modules) > 1:
threads = [threading.Thread(target=_worker,
args=(i, module, input, target,
kwargs, device),)
for i, (module, input, target, kwargs, device) in
enumerate(zip(modules, inputs, targets, kwargs_tup, devices))]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
else:
_worker(0, modules[0], inputs[0], targets[0], kwargs_tup[0], devices[0])
outputs = []
for i in range(len(inputs)):
output = results[i]
if isinstance(output, Exception):
raise output
outputs.append(output)
return outputs
###########################################################################
# Adapted from Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
#
class CallbackContext(object):
pass
def execute_replication_callbacks(modules):
"""
Execute an replication callback `__data_parallel_replicate__` on each module created
by original replication.
The callback will be invoked with arguments `__data_parallel_replicate__(ctx, copy_id)`
Note that, as all modules are isomorphism, we assign each sub-module with a context
(shared among multiple copies of this module on different devices).
Through this context, different copies can share some information.
We guarantee that the callback on the master copy (the first copy) will be called ahead
of calling the callback of any slave copies.
"""
master_copy = modules[0]
nr_modules = len(list(master_copy.modules()))
ctxs = [CallbackContext() for _ in range(nr_modules)]
for i, module in enumerate(modules):
for j, m in enumerate(module.modules()):
if hasattr(m, '__data_parallel_replicate__'):
m.__data_parallel_replicate__(ctxs[j], i)
def patch_replication_callback(data_parallel):
"""
Monkey-patch an existing `DataParallel` object. Add the replication callback.
Useful when you have customized `DataParallel` implementation.
Examples:
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallel(sync_bn, device_ids=[0, 1])
> patch_replication_callback(sync_bn)
# this is equivalent to
> sync_bn = SynchronizedBatchNorm1d(10, eps=1e-5, affine=False)
> sync_bn = DataParallelWithCallback(sync_bn, device_ids=[0, 1])
"""
assert isinstance(data_parallel, DataParallel)
old_replicate = data_parallel.replicate
@functools.wraps(old_replicate)
def new_replicate(module, device_ids):
modules = old_replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules
data_parallel.replicate = new_replicate
| [
"hsfzxjy@gmail.com"
] | hsfzxjy@gmail.com |
ba22a5ee77e66e4a5f2402afd73b5a73be691250 | 0604b216f171189423d0d803adbbcb71f5e8f695 | /module_random2.py | 00cb356511afe16a6ae69c70d95a226d66fdc687 | [] | no_license | rahulraju69/Python-IntershipFiles | e97d95a3f8cb871c1d818d539447df3632791a04 | 432dedf1470026165ea270aa56b123d8c0bd35a7 | refs/heads/master | 2023-03-08T07:48:23.424756 | 2021-01-31T09:53:59 | 2021-01-31T09:53:59 | 334,621,174 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 93 | py | #shuffle
import random as r
fruits=["apple","mango","grapes"]
r.shuffle(fruits)
print(fruits) | [
"rahulraju9550@gmail.com"
] | rahulraju9550@gmail.com |
cc95e675ce9006d3e9f7d28cffe4c7ef20978ece | e024cc2f51d2c9104a514f3f1a77c5cabbe7691a | /examplePandas.py | c717fed2795c5e5ce4b716fd2779e9a249e1c041 | [] | no_license | wilsonsk/Machine-Learning-for-Stock-Trading | 1818f144df02e69ce3e29fe1eb576675d512324a | bf5a36942e0f39e6c6d1c521bb3532e7eb82b669 | refs/heads/master | 2021-06-08T18:47:35.787532 | 2016-09-23T05:13:22 | 2016-09-23T05:13:22 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 266 | py | import pandas as pd
def test_run():
df = pd.read_csv("data/AAPL.csv");
print df #print entire dataframe
#print df.head() -- print first 5 rows
#print df.tail() -- print last 5 rows
#print df.tail(n) -- print last n rows
if __name__ == "__main__":
test_run()
| [
"root@localhost.localdomain"
] | root@localhost.localdomain |
9f5c8c7fb2e6862eaa12b20003eb6fd44d644b47 | b8fb9ab808e092053c78d114f75ffbde5aa5c3c4 | /py_core/particle.py | b1cf158c82a89abfb65086f0075793500ee81896 | [] | no_license | grburgess/pyjnu | 3150f73b03323b7b1192d5da668bf066eca66007 | d83e39bbeee774b3b6084728757f69d11ce69856 | refs/heads/master | 2018-11-27T18:15:51.362936 | 2018-09-17T12:56:14 | 2018-09-17T12:56:14 | 124,082,249 | 0 | 2 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | """
particle.py
Authors:
-Martina Karl
-Stephan Meighen-Berger
Deals with the different interactions and particles
of the models.
"""
import numpy as np
from config import config
from constants import phys_const
from math import sqrt
from logger import Logger
class particle(Logger):
"""
class: particle
Class to create particles (the fluxes)
Parameters:
-None
Returns:
-None
"""
def __init__(self, PDG_ID):
"""
function: __init__
Function to initialize the instance.
Parameters:
-str PDG_ID:
The PDG_ID of the particle
Returns:
-None
"""
self.logger.info('Creating particle ' + PDG_ID)
self.mass = phys_const['mass_' + PDG_ID]
self.emin = config['emin_' + PDG_ID]
self.emax = config['emax_' + PDG_ID]
self.size = config['grid_' + PDG_ID]
self.step = np.exp(np.log(self.emax / self.emin) / self.size)
self.e_grid = np.logspace(np.log(self.emin), np.log(self.emax),
self.size, base=np.e, endpoint=False)
self.e_borders = self.e_grid * sqrt(self.step)
# First position in the borders
self.e_borders = np.insert(self.e_borders, 0,
self.emin / sqrt(self.step))
self.e_diff = np.diff(self.e_borders)
self.flux = {}
self.dflux = {}
self.logger.info('Finished particle ' + PDG_ID)
| [
"theo.glauch@tum.de"
] | theo.glauch@tum.de |
294428420539f48b42712835aa446ba29b706061 | 60096eba428275a28ab53d364aef0b9bc29e71c8 | /hris/models.py | 9a2b067dfbdab5351c3fedc2181e89d2624e2c8f | [] | no_license | RobusGauli/hris_new | 30ef8d17aceceb5f6c8f69f65df508228cb31f33 | 634f18d162310df9331543f7a877cac619ee1622 | refs/heads/master | 2021-01-19T21:55:39.279378 | 2017-04-29T04:32:38 | 2017-04-29T04:32:38 | 88,724,501 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 15,787 | py | from sqlalchemy import (
Column,
String,
Integer,
ForeignKey,
Text,
Enum,
CheckConstraint,
DateTime,
func,
Date,
Float,
Boolean
)
#default
#onupdate
from psycopg2 import IntegrityError
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker, relationship
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Sequence
from hris import Base
class User(Base):
__tablename__ = 'users'
id = Column(Integer, primary_key=True, autoincrement=True)
user_name = Column(String(20), nullable=False, unique=True)
password = Column(String, nullable=False)
access_token = Column(String)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(20))
updated_by = Column(String(20))
role_id = Column(Integer, ForeignKey('roles.id'))
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#employee_id
password_changed = Column(Boolean, default=False)
#relationship
role = relationship('Role', back_populates='users')
#one to one with employees
employee = relationship('Employee', uselist=False, back_populates='user')
def to_dict(self):
data = {
'user_name' : self.user_name if self.user_name else '',
'role_id' : self.role_id if self.role_id else '',
'employee_data' : self.employee.to_dict() if self.employee else {},
'id' : self.id if self.id else '',
'role_name' : self.role.role_type
}
return data
class Role(Base):
__tablename__ = 'roles'
id = Column(Integer, primary_key=True, autoincrement=True)
role_type = Column(String, unique=True, nullable=False)
role_code = Column(String(20), unique=True, nullable=False)
role_type_display_name = Column(String(200), nullable=False)
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
agency_management_perm = Column(Enum('N', 'R', 'W', 'E', name='amp'), default='N')
division_management_perm = Column(Enum('N', 'R', 'W', 'E', name='dmp'), default='N')
agency_emp_perm = Column(Enum('N', 'R', 'W', 'E', name='aep'), default='N')
division_emp_perm = Column(Enum('N', 'R', 'W', 'E', name='dep'), default='N')
company_management_perm = Column(Enum('N', 'R', 'W', 'E', name='cmp'), default='N')
config_management_perm = Column(Enum('N', 'R', 'W', 'E', name='comp'), default='N')
read_management_perm = Column(Enum('N', 'A', 'B', 'D', 'O', name='rmp'), default='N')
user_management_perm = Column(Enum('N', 'R', 'W', 'E', name='ump'), default='N')
permission_eight = Column(Boolean, default=False)
permission_nine = Column(Boolean, default=False)
permission_ten = Column(Boolean, default=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(20))
updated_by = Column(String(20))
#relationship
users = relationship('User', back_populates='role', cascade = 'all, delete, delete-orphan')
def to_dict(self):
role = {
'role_type' : self.role_type,
'id' : self.id,
'agency_management_perm' : self.agency_management_perm if self.agency_management_perm else 'N',
'activate' : self.activate if self.activate else True,
'division_management_perm' : self.division_management_perm if self.division_management_perm else 'N',
'agency_emp_perm' : self.agency_emp_perm if self.agency_emp_perm else 'N',
'division_emp_perm' : self.division_emp_perm if self.division_emp_perm else 'N',
'company_management_perm': self.company_management_perm if self.company_management_perm else 'N',
'config_management_perm': self.config_management_perm if self.config_management_perm else 'N',
'read_management_perm' : self.read_management_perm if self.read_management_perm else 'N',
'user_management_perm' : self.user_management_perm if self.user_management_perm else 'O',
}
return role
class CompanyDetail(Base):
__tablename__ = 'companydetail'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(30), unique=True)
description = Column(String(300))
currency_symbol = Column(String(2), unique=True)
is_prefix = Column(Boolean, default=False)
country = Column(String(30), nullable=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
class Branch(Base):
__tablename__ = 'branches'
id = Column(Integer, primary_key=True, autoincrement=True)
is_branch = Column(Boolean, default=False)
facility_name = Column(String(40), nullable=False, unique=True)
facility_display_name = Column(String(40))
acitivate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#foreignt keys
facility_type_id = Column(Integer, ForeignKey('facilitytypes.id'))
llg_id = Column(Integer, ForeignKey('llg.id'))
district_id = Column(Integer, ForeignKey('districts.id'))
province_id = Column(Integer, ForeignKey('provinces.id'))
region_id = Column(Integer, ForeignKey('regions.id'))
#relationship
facility_type = relationship('FacilityType', back_populates='branches')
llg = relationship('LLG', back_populates='branches')
district = relationship('District', back_populates='branches')
province = relationship('Province', back_populates='branches')
region = relationship('Region', back_populates='branches')
#realiationhsip
employees = relationship('Employee', back_populates='employee_branch', cascade='all, delete, delete-orphan')
class FacilityType(Base):
__tablename__ = 'facilitytypes'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(200), unique=True, nullable=False)
display_name = Column(String(200), nullable=False, unique=True)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='facility_type', cascade='all, delete, delete-orphan')
class LLG(Base):
__tablename__ = 'llg'
id = Column(Integer, primary_key=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='llg', cascade='all, delete, delete-orphan')
class District(Base):
__tablename__ = 'districts'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='district', cascade='all, delete, delete-orphan')
class Province(Base):
__tablename__ = 'provinces'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='province', cascade='all, delete, delete-orphan')
class Region(Base):
__tablename__ = 'regions'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), unique=True, nullable=False)
display_name = Column(String(200), unique=True, nullable=False)
del_flag = Column(Boolean, default=False)
branches = relationship('Branch', back_populates='region', cascade='all, delete, delete-orphan')
#create an engine
#for employee
class EmployeeCategoryRank(Base):
__tablename__ = 'emp_cat_ranks'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), nullable=False, unique=True)
display_name = Column(String(100), nullable=False, unique=True)
activate = Column(Boolean, default=True)
del_flag = Column(Boolean, default=False)
#realtionship
emp_categories = relationship('EmployeeCategory', back_populates='emp_cat_rank', cascade='all, delete, delete-orphan')
class EmployeeCategory(Base):
__tablename__ = 'emp_categories'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(50), nullable=False, unique=True)
display_name = Column(String(50), nullable=False, unique=True)
activate = Column(Boolean, default=True)
emp_cat_rank_id = Column(Integer, ForeignKey('emp_cat_ranks.id'))
#realationship
emp_cat_rank = relationship('EmployeeCategoryRank', back_populates='emp_categories')
#relationship
employees = relationship('Employee', back_populates='employee_category', cascade='all, delete, delete-orphan')
#lets hardcord the grade of the employee
class EmployeeType(Base):
__tablename__ = 'emp_types'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(100), nullable=False, unique=True)
display_name = Column(String(100), nullable=False, unique=True)
activate = Column(Boolean, default=True)
#relationship
employees = relationship('Employee', back_populates='employee_type', cascade='all, delete, delete-orphan')
class SalaryStep(Base):
__tablename__ = 'salarysteps'
id = Column(Integer, primary_key=True, autoincrement=True)
val = Column(String(4), nullable=False, unique=True)
activate = Column(Boolean, default=True)
class Employee(Base):
__tablename__ = 'employees'
id = Column(Integer, primary_key=True, autoincrement=True)
first_name = Column(String(40), nullable=False)
middle_name = Column(String(40))
last_name = Column(String(40), nullable=False)
sex = Column(Enum('M', 'F', 'O', name='sex'), nullable=False)
date_of_birth = Column(Date, nullable=False)
address_one = Column(String(50), nullable=False)
address_two = Column(String(50))
village = Column(String(100))
llg = Column(String(100))
district = Column(String(100))
province = Column(String(100))
region = Column(String(100))
country = Column(String(40))
email_address = Column(String(100), unique=True)
contact_number = Column(String(30), unique=True)
alt_contact_number = Column(String(30), unique=True)
age = Column(Integer, nullable=False)
retirement_age = Column(Integer, nullable=False, default=50)
employement_number = Column(String(20), unique=True)
salary_step = Column(String(6))
date_of_commencement = Column(Date)
contract_end_date = Column(Date)
activate = Column(Boolean, default=True)
#about del flag
del_flag = Column(Boolean, default=False)
created_at = Column(DateTime, default=func.now())
updated_at = Column(DateTime, default=func.now(), onupdate=func.now())
created_by = Column(String(50))
updated_by = Column(String(50))
photo = Column(String(500), unique=True)
document = Column(String(500), unique=True)
is_branch = Column(Boolean, nullable=False, default=True)
#branch_id_of_employee
employee_branch_id = Column(Integer, ForeignKey('branches.id'), nullable=False)
#relationship
employee_branch = relationship('Branch', back_populates='employees')
employee_type_id = Column(Integer, ForeignKey('emp_types.id'), nullable=False)
employee_category_id = Column(Integer, ForeignKey('emp_categories.id'), nullable=False)
#one to one with users table
user_id = Column(Integer, ForeignKey('users.id'), unique=True)
user = relationship('User', back_populates='employee')
#one to one with employeeextra table
employee_extra = relationship('EmployeeExtra', uselist=False, back_populates='employee')
#relationship
employee_type = relationship('EmployeeType', back_populates='employees')
employee_category = relationship('EmployeeCategory', back_populates='employees')
#other relationship
qualifications = relationship('Qualification', back_populates='employee', cascade='all, delete, delete-orphan')
certifications = relationship('Certification', back_populates='employee', cascade='all, delete, delete-orphan')
trainings = relationship('Training', back_populates='employee', cascade='all, delete, delete-orphan')
def to_dict(self):
data = {
'employement_number' : self.employement_number if self.employement_number else '',
'first_name' : self.first_name if self.first_name else '',
'middle_name' : self.middle_name if self.middle_name else '',
'last_name' : self.last_name if self.last_name else '',
'address_one' : self.address_one if self.address_one else '',
'contact_number' : self.contact_number if self.contact_number else '',
'country' : self.country if self.country else '',
'id' : self.id if self.id else ''
}
return data
class EmployeeExtra(Base):
__tablename__ = 'employee_extra'
id = Column(Integer, primary_key=True, autoincrement=True)
employee_id = Column(Integer, ForeignKey('employees.id'), unique=True)
ref_name = Column(String(40))
ref_address = Column(String(40))
ref_contact_number = Column(String(20))
emp_father_name = Column(String(40))
emp_mother_name = Column(String(40))
emp_single = Column(Boolean, default=True)
emp_wife_name = Column(String(40))
emp_num_of_children = Column(Integer)
del_flag = Column(Boolean, default=False)
#relationship
employee = relationship('Employee', back_populates='employee_extra')
class Qualification(Base):
__tablename__ = 'qualifications'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(60))
institute_name = Column(String(100))
city = Column(String(30))
state = Column(String(30))
province = Column(String(30))
country = Column(String(40))
start_date = Column(Date)
end_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
#relationship
employee = relationship('Employee', back_populates='qualifications')
class Certification(Base):
__tablename__ = 'certifications'
id = Column(Integer, primary_key=True, autoincrement=True)
registration_number = Column(String(40), nullable=False, unique=True)
regulatory_body = Column(String(40), nullable=False)
registration_type = Column(String(40))
last_renewal_date = Column(Date)
expiry_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
#relationship
employee = relationship('Employee', back_populates='certifications')
class Training(Base):
__tablename__ = 'trainings'
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String(200), nullable=False)
organiser_name = Column(String(200))
funding_source = Column(String(200))
duration = Column(String(30))
institue = Column(String(50))
city = Column(String(50))
state = Column(String(50))
province = Column(String(50))
country = Column(String(50))
start_date = Column(Date)
end_date = Column(Date)
del_flag = Column(Boolean, default=False)
employee_id = Column(Integer, ForeignKey('employees.id'))
employee = relationship('Employee', back_populates='trainings')
| [
"user@Users-MacBook-Air.local"
] | user@Users-MacBook-Air.local |
812e73a18219a81e50ee782b3b969d04ed5bd39d | a1574e07d5196e2a5c27546efcf4e6096aa549a0 | /Labs/Lab4/Lab9Ex7.py | 1a4e195affb205921f0054bef4b9e5d199bd42d2 | [] | no_license | JLevins189/Python | 722b4733f0aea6cbace5989d7cad2debd0d5e60b | fce6d0879761203fc020aeaefab56187f343decc | refs/heads/main | 2023-04-10T20:09:50.779812 | 2021-04-15T12:44:07 | 2021-04-15T12:44:07 | 358,255,377 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 255 | py | def stringop(my_str1):
str = ""
index = 0
counter = 0
str= ""
for counter in range(len(my_str1)):
if counter % 2 == 0:
str += my_str1[counter]
print(str)
my_str1 = input("Input a string")
stringop(my_str1)
| [
"jacklevins@hotmail.com"
] | jacklevins@hotmail.com |
e9dc6522d084ce0ed69299a6b78fa34d109f82ab | 6b5717887575d3b122cdf69ca540f83e6d3ebcd9 | /callbacks.py | f272ba3df5957d697bb9df82755be342c5077ac7 | [] | no_license | sagerpascal/KI1_Lab1_Reinforcment-Learning | 409a8c397702ed16e17f358adc0f818c7355b78f | ce6c6e9b3482cc81ef528c6948b36e52c86380e3 | refs/heads/master | 2022-02-24T05:02:52.579187 | 2019-10-20T11:49:53 | 2019-10-20T11:49:53 | 215,561,248 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,212 | py | # Keras-RL ruft diese Funktionen auf nach jeder Episode (Calbacks)
#
from os.path import exists
import csv
import numpy as np
import matplotlib.pyplot as plt
from rl.callbacks import Callback, TestLogger
class KerasCallbackLogger(TestLogger):
def on_episode_end(self, episode, logs):
# Callback von Keras-RL nach jeder Episode
grid = self.env.get_board()
print('Episode: ' + str(episode + 1) + 'Max Tile: ' + str(np.amax(grid)) + ' Punkte Episode: ' + str(
logs['episode_reward']) + 'Steps: ' + str(logs['nb_steps']))
print("Grid am Ende: \n{0}\n".format(grid))
class TrainEpisodeLogger2048(Callback):
# Plote die Grafiken und schreibe CSV-File
def __init__(self, filePath):
self.observations = {}
self.rewards = {}
self.max_tile = {}
self.step = 0
self.episodes = []
self.max_tiles = []
self.episodes_rewards = []
self.fig_max_tile = plt.figure()
self.ax1 = self.fig_max_tile.add_subplot(1, 1, 1)
self.fig_reward = plt.figure()
self.ax2 = self.fig_reward.add_subplot(1, 1, 1)
self.max_tiles_means = 0
self.episodes_rewards_means = 0
self.fig_max_tile_mean = plt.figure()
self.ax3 = self.fig_max_tile_mean.add_subplot(1, 1, 1)
self.fig_reward_mean = plt.figure()
self.ax4 = self.fig_reward_mean.add_subplot(1, 1, 1)
self.nb_episodes_for_mean = 50
self.episode_counter = 0
# CSV file:
if exists(filePath):
csv_file = open(filePath, "a") # a = append
self.csv_writer = csv.writer(csv_file, delimiter=',')
else:
csv_file = open(filePath, "w") # w = write (clear and restart)
self.csv_writer = csv.writer(csv_file, delimiter=',')
headers = ['episode', 'episode_steps', 'episode_reward', 'max_tile']
self.csv_writer.writerow(headers)
def on_episode_begin(self, episode, logs):
# Werte rücksetzen (Aufgerufen von Keras-RL)
self.observations[episode] = []
self.rewards[episode] = []
self.max_tile[episode] = 0
def on_episode_end(self, episode, logs):
# Daten ausgeben und CSV schreiben (Aufgerufen von Keras-RL)
self.episode_counter += 1
self.episodes = np.append(self.episodes, episode + 1)
self.max_tiles = np.append(self.max_tiles, self.max_tile[episode])
self.episodes_rewards = np.append(self.episodes_rewards, np.sum(self.rewards[episode]))
print('Episode: ' + str(episode + 1) + 'Episode Steps: ' +
str(len(self.observations[episode])) + 'Max Tile: ' + str(self.max_tiles[-1]) + ' Punkte Episode: ' + str(
self.episodes_rewards[-1]))
# Speichere CSV:
self.csv_writer.writerow(
(episode + 1, len(self.observations[episode]), self.episodes_rewards[-1], self.max_tiles[-1]))
# Plots erstellen -> kopiert
if self.episode_counter % self.nb_episodes_for_mean == 0:
self.max_tiles_means = np.append(self.max_tiles_means, np.mean(self.max_tiles[-self.nb_episodes_for_mean:]))
self.fig_max_tile_mean.clear()
plt.figure(self.fig_max_tile_mean.number)
plt.plot(np.arange(0, self.episode_counter + self.nb_episodes_for_mean, self.nb_episodes_for_mean), self.max_tiles_means)
plt.title("Höchster Block (in den letzten {} Episoden)".format(self.nb_episodes_for_mean))
plt.xlabel("Episode")
plt.ylabel("Durchschn. höchster Block")
plt.pause(0.01)
self.episodes_rewards_means = np.append(self.episodes_rewards_means, np.mean(self.episodes_rewards[-self.nb_episodes_for_mean:]))
self.fig_reward_mean.clear()
plt.figure(self.fig_reward_mean.number)
plt.plot(np.arange(0, self.episode_counter + self.nb_episodes_for_mean, self.nb_episodes_for_mean), self.episodes_rewards_means)
plt.title("Punkte-Durchschnitt (in den letzten {} Episoden)".format(self.nb_episodes_for_mean))
plt.xlabel("Episode")
plt.ylabel("Punkte-Durchschnitt")
plt.pause(0.01)
# Figures: Points
self.fig_max_tile.clear()
plt.figure(self.fig_max_tile.number)
plt.scatter(self.episodes, self.max_tiles, s=1)
plt.title("Höchster Block pro Episode")
plt.xlabel("Episode")
plt.ylabel("Höchster Block")
plt.pause(0.01)
self.fig_reward.clear()
plt.figure(self.fig_reward.number)
plt.scatter(self.episodes, self.episodes_rewards, s=1)
plt.title("Punkte pro Episode")
plt.xlabel("Episode")
plt.ylabel("Punkte")
plt.pause(0.01)
# Resourcen freigeben
del self.observations[episode]
del self.rewards[episode]
del self.max_tile[episode]
def on_step_end(self, step, logs):
# Update der Statistiken
episode = logs['episode']
self.observations[episode].append(logs['observation'])
self.rewards[episode].append(logs['reward'])
self.max_tile[episode] = logs['info']['max_tile']
self.step += 1
| [
"sagerpa1@students.zhaw.ch"
] | sagerpa1@students.zhaw.ch |
7fa4bc06e2cbd7ed79a8af35e751930167b113c2 | a6adc78bddbf4a7c01327ed793e2108a2cfdd825 | /profiles/views.py | ec1ebbb178f28adb7ca0c1bc20ede5e0ff5d841e | [] | no_license | yugeshnukala/Twitter-Clone | 434f6c9a8dc073c732a306ad6afd2af70f7c68d0 | 5bb2b4245f2aa9cfdd0efaa352c20b414a744631 | refs/heads/master | 2022-12-07T18:00:12.494723 | 2020-08-21T06:03:20 | 2020-08-21T06:03:20 | 288,428,191 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,501 | py | from django.http import Http404
from django.shortcuts import render, redirect
from .forms import ProfileForm
from .models import Profile
def profile_update_view(request, *args, **kwargs):
if not request.user.is_authenticated: # is_authenticated()
return redirect("/login?next=/profile/update")
user = request.user
user_data = {
"first_name": user.first_name,
"last_name": user.last_name,
"email": user.email
}
my_profile = user.profile
form = ProfileForm(request.POST or None, instance=my_profile, initial=user_data)
if form.is_valid():
profile_obj = form.save(commit=False)
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
email = form.cleaned_data.get('email')
user.first_name = first_name
user.last_name = last_name
user.email = email
user.save()
profile_obj.save()
context = {
"form": form,
"btn_label": "Save",
"title": "Update Profile"
}
return render(request, "profiles/form.html", context)
def profile_detail_view(request, username, *args, **kwargs):
# get the profile for the passed username
qs = Profile.objects.filter(user__username=username)
if not qs.exists():
raise Http404
profile_obj = qs.first()
context = {
"username": username,
"profile": profile_obj
}
return render(request, "profiles/detail.html", context) | [
"yugeshnukala95@gmail.com"
] | yugeshnukala95@gmail.com |
6b2dc4c4ace54c42df53fad4d1201457c5f52c49 | 881041fab1b4d05f1c5371efed2f9276037eb609 | /tasks/where-civilian-complaints-were-reported-2005-2009/depositor.py | cfc1f38a64c3ca6b8dd165f0179f14f18bf8bf97 | [] | no_license | ResidentMario/urban-physiology-nyc-catalog | b568f3b6ee1a887a50c4df23c488f50c92e30625 | cefbc799f898f6cdf24d0a0ef6c9cd13c76fb05c | refs/heads/master | 2021-01-02T22:43:09.073952 | 2017-08-06T18:27:22 | 2017-08-06T18:27:22 | 99,377,500 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 408 | py | import requests
r = requests.get("https://data.cityofnewyork.us/api/views/wqr5-zmgj/rows.csv?accessType=DOWNLOAD")
with open("/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv", "wb") as f:
f.write(r.content)
outputs = ["/home/alex/Desktop/urban-physiology-nyc-catalog/catalog/where-civilian-complaints-were-reported-2005-2009/data.csv"]
| [
"aleksey.bilogur@gmail.com"
] | aleksey.bilogur@gmail.com |
3b9604a56f33fc339e8f80bd46f0bfc0fc240d20 | 30c0bafd9d0e8c82608510eb4f6bf312c6cf9018 | /bayes.py | 5d075254b61dff81f3e8e9b535cf20ec0fe2706b | [] | no_license | 2233niyubao/ML | 5013039f5b3163ba7a4dfcd6748a7db76decf36f | cac9769dd46d7e582ecf1556d65d8ad0f5da2990 | refs/heads/main | 2023-02-26T11:56:05.366264 | 2021-02-03T15:52:09 | 2021-02-03T15:52:09 | 334,967,527 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 805 | py | def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'grabage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0, 1, 0, 1, 0, 1]
return postingList, classVec
def createVocabList(dataSet):
vocabSet = set ([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)
def setOfWords2Vec(vocabList, inputSet):
returnVec = [0]*len(vocabList)
return returnVec
| [
"2233niyubao@163.com"
] | 2233niyubao@163.com |
216af594580d96800f9747a8650c7a4f5c81e89f | 88ba19b3303c112a424720106a7f7fde615757b5 | /03-data_manipulation_with_pandas/01-transforming_data/sorting_rows1.py | 0939c1757697add7f2c7c4dbd665fad67ebd8b1c | [] | no_license | mitchisrael88/Data_Camp | 4100f5904c62055f619281a424a580b5b2b0cbc1 | 14356e221f614424a332bbc46459917bb6f99d8a | refs/heads/master | 2022-10-22T18:35:39.163613 | 2020-06-16T23:37:41 | 2020-06-16T23:37:41 | 263,859,926 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,368 | py | Python 3.7.4 (tags/v3.7.4:e09359112e, Jul 8 2019, 20:34:20) [MSC v.1916 64 bit (AMD64)] on win32
Type "help", "copyright", "credits" or "license()" for more information.
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by descending family members
homelessness_fam = homelessness.sort_values("family_members", ascending=False)
# Print the top few rows
print(homelessness_fam.head())
SyntaxError: multiple statements found while compiling a single statement
>>>
=============================== RESTART: Shell ===============================
>>> # Sort homelessness by individual
homelessness_ind = homelessness.sort_values("individuals")
# Print the top few rows
print(homelessness_ind.head())
| [
"noreply@github.com"
] | mitchisrael88.noreply@github.com |
170a9f6840626ccbdc39ec724bedd10138df1fc0 | 531c47c15b97cbcb263ec86821d7f258c81c0aaf | /sdk/security/azure-mgmt-security/azure/mgmt/security/_configuration.py | 9aa2b7aa11ce32d405db56ca4db44791e423a5c6 | [
"LicenseRef-scancode-generic-cla",
"LGPL-2.1-or-later",
"MIT"
] | permissive | YijunXieMS/azure-sdk-for-python | be364d3b88204fd3c7d223df23756386ff7a3361 | f779de8e53dbec033f98f976284e6d9491fd60b3 | refs/heads/master | 2021-07-15T18:06:28.748507 | 2020-09-04T15:48:52 | 2020-09-04T15:48:52 | 205,457,088 | 1 | 2 | MIT | 2020-06-16T16:38:15 | 2019-08-30T21:08:55 | Python | UTF-8 | Python | false | false | 2,145 | py | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrestazure import AzureConfiguration
from .version import VERSION
class SecurityCenterConfiguration(AzureConfiguration):
"""Configuration for SecurityCenter
Note that all parameters used to create this instance are saved as instance
attributes.
:param credentials: Credentials needed for the client to connect to Azure.
:type credentials: :mod:`A msrestazure Credentials
object<msrestazure.azure_active_directory>`
:param subscription_id: Azure subscription ID
:type subscription_id: str
:param asc_location: The location where ASC stores the data of the
subscription. can be retrieved from Get locations
:type asc_location: str
:param str base_url: Service URL
"""
def __init__(
self, credentials, subscription_id, asc_location, base_url=None):
if credentials is None:
raise ValueError("Parameter 'credentials' must not be None.")
if subscription_id is None:
raise ValueError("Parameter 'subscription_id' must not be None.")
if asc_location is None:
raise ValueError("Parameter 'asc_location' must not be None.")
if not base_url:
base_url = 'https://management.azure.com'
super(SecurityCenterConfiguration, self).__init__(base_url)
# Starting Autorest.Python 4.0.64, make connection pool activated by default
self.keep_alive = True
self.add_user_agent('azure-mgmt-security/{}'.format(VERSION))
self.add_user_agent('Azure-SDK-For-Python')
self.credentials = credentials
self.subscription_id = subscription_id
self.asc_location = asc_location
| [
"zikalino@microsoft.com"
] | zikalino@microsoft.com |
d88df27b4f46d730bf923b059ebaf72aae112cda | 74d43a0204e18943aaddc0de02ebe22336707d3c | /剑指offer/第32题_1到n整数中1出现的次数.py | f42e6454195567f12e7197fc337047de6e4f36b5 | [] | no_license | gamersover/jianzhi_offer | 939876340779e1aae11dff3962eafa034fe0ba1f | c8e5075a80360063fecdc84ed26539167d1810a0 | refs/heads/master | 2020-04-05T10:23:39.348768 | 2019-03-01T03:38:32 | 2019-03-01T03:38:32 | 156,797,180 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 958 | py | # -*- coding:utf-8 -*-
"""
剑指offer第32题
问题:输入一个整数n,求出从1到n这n个整数的十进制表示中1出现的次数
思路:先计算个位出现1的次数,在计算十位出现1的次数,依此类推
假设22314,当前位是百位3,前面的数22,后面的数时14
如果当前位大于1,则00 1 **-22 1 **有(前面的数+1)*当前的位数(100)个1
如果当前位等于1,则有前面的数*当前的位数 + 后面的数 + 1个1
如果等于0,则有前面的数*当前的位数个1
"""
def count_one(n):
i = 1
count = 0
while n // i != 0:
current = (n//i) % 10
before = n // (i*10)
after = n - (n//i)*i
if current > 1:
count += (before + 1) * i
elif current == 1:
count += before * i + after + 1
elif current == 0:
count += before * i
i *= 10
return count
print(count_one(100))
| [
"cmathking@gmail.com"
] | cmathking@gmail.com |
20ac5f604516793a8054d18c91702340e4a39b11 | 5eea575d3fc9b23f27747720228fdb27a8b9db6d | /scripts/plotting/newhistogram.py | 28d86f9b8ff0bd510eade9154c7eae97ae1774e9 | [] | no_license | bhofman/GroomRL | 932855db4fe5a81d19824f7759f0ca83b8db24bf | 0bca1277f36ee6d19f6e8b6b0e5b8ebb14aad6a8 | refs/heads/master | 2022-01-21T02:17:39.688419 | 2019-08-14T09:21:35 | 2019-08-14T09:21:35 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 21,447 | py | from __future__ import division, print_function
import numpy as np
from math import sqrt, floor, log, exp
import sys
# single-histogram binning & binning for x-axis in 2d-histograms
_default_bins = None
# binning for y-axis in 2d histograms
_default_bins_y = None
# output format for 2D case
_compact2D = False
def set_compact2D(value = True):
"""
Sets whether 2D output is made compact (6 digits, just midpoints of x & y bins)
"""
global _compact2D
_compact2D = value
def set_default_bins(bins_x, bins_y = None):
"""
Sets the default binning; if only one argument is provided the y binning
(for 2d histograms) is set to the x binning (== single-axis histogram binning)
"""
global _default_bins, _default_bins_y
_default_bins = bins_x
if (bins_y is None): _default_bins_y = bins_x
else: _default_bins_y = bins_y
#----------------------------------------------------------------------
class Bins(object):
def string_from_ibin(self, ibin, format_string = "{} {} {}"):
"""returns a string describing this bin using the given format, which
can take positional arguments (in the order xlo, xmid, xhi) or
named arguments (xlo, xmid, xhi).
"""
if (ibin < 0): return "underflow"
if (ibin >= self.nbins): return "underflow"
xlo = self.xlo (ibin)
xmid = self.xmid(ibin)
xhi = self.xhi (ibin)
return format_string.format(xlo, xmid, xhi, xlo=xlo, xmid=xmid, xhi=xhi)
def string_from_x(self, x, number_format = "{} {} {}"):
"""Similar to string_from_ibin, but you supply the x value instead"""
return self.string_from_ibin(self.ibin(x), number_format)
def xedges_with_outflow(self):
"Returns a numpy array with the bin edges, including outflow markers"
edges = np.empty(self.nbins + 3)
edges[1:-1] = self.xedges()
edges[0] = -np.inf
edges[-1] = np.inf
return edges
#----------------------------------------------------------------------
class LinearBins(Bins):
def __init__(self, lo, hi, dbin):
# deal with case of explicit limits
self.nbins = int(abs((hi-lo)/dbin) + 0.5)
self.dbin = (hi-lo)/self.nbins
self.lo = lo
self.hi = hi
def ibin(self,xvalue):
# identify the bin in the case where we have uniform spacing
return int(floor((xvalue - self.lo)/self.dbin))
def xlo(self, _ibin):
return self.lo + (_ibin)*self.dbin
def xmid(self, _ibin):
return self.xvalue(_ibin)
def xhi(self, _ibin):
return self.lo + (_ibin+1.0)*self.dbin
def xvalue(self, _ibin):
return self.lo + (_ibin+0.5)*self.dbin
def xvalues(self):
return self.lo + (np.arange(0, self.nbins)+0.5)*self.dbin
def xedges(self):
"Returns a numpy array with the bin edges"
return np.array([self.lo + (ibin)*self.dbin for ibin in range(0,self.nbins+1)])
def __str__(self):
return "Linear bins from {} to {}, each of size {}".format(self.lo, self.hi, self.dbin)
#----------------------------------------------------------------------
class LogBins(Bins):
def __init__(self, lo, hi, dbin):
"""
Create a logarithmic binning between lo and hi, where dbin is the
bin size (in the natural logarithm of the variable)
"""
# deal with case of explicit limits
self.nbins = int(abs(log(hi/lo)/dbin) + 0.5)
self.dbin = log(hi/lo)/self.nbins
self.lo = lo
self.hi = hi
def ibin(self,xvalue):
# identify the bin in the case where we have uniform spacing
return int(floor(log(xvalue / self.lo)/self.dbin))
def xlo(self, _ibin):
return self.lo *exp(_ibin*self.dbin)
def xmid(self, _ibin):
return self.xvalue(_ibin)
def xhi(self, _ibin):
return self.lo * exp((_ibin+1.0)*self.dbin)
def xvalue(self, _ibin):
return self.lo * exp((_ibin+0.5)*self.dbin)
def xvalues(self):
return self.lo * np.exp((np.arange(0, self.nbins)+0.5)*self.dbin)
def __str__(self):
return "Logarithmic bins from {} to {}, each of logarithmic size {}".format(self.lo, self.hi, self.dbin)
#----------------------------------------------------------------------
class CustomBins(Bins):
def __init__(self, bin_edges):
self._bin_edges = np.array(bin_edges)
self.nbins = len(self._bin_edges) - 1
self.lo = self._bin_edges[ 0]
self.hi = self._bin_edges[-1]
def ibin(self,xvalue):
# identify the bin in the case where we have explicit bin edges
#
# NB: programming this by hand gives a result that runs 10x
# faster than using numpy's searchsorted.
if (xvalue < self.lo): return -1
if (xvalue > self.hi): return self.nbins
# bisect to find the bin
ilo = 0
ihi = self.nbins
while (ihi - ilo != 1):
imid = (ilo+ihi)//2
if (xvalue > self._bin_edges[imid]): ilo = imid
else : ihi = imid
return ilo
# --- this version is slow... ---
#u = np.searchsorted(self._bin_edges, [xvalue])
#return u[0]-1
def xlo(self, _ibin):
return self._bin_edges[_ibin]
def xmid(self, _ibin):
return 0.5 * (self._bin_edges[_ibin] + self._bin_edges[_ibin+1])
def xhi(self, _ibin):
return self._bin_edges[_ibin+1]
def xvalue(self, _ibin):
return self.xmid(_ibin)
def xvalues(self):
return 0.5 * (self._bin_edges[:-1] + self._bin_edges[1:])
def xedges(self):
"Returns a numpy array with the bin edges"
return np.array(self._bin_edges)
def __str__(self):
return "CustomBins with edges at {}".format(self._bin_edges)
#----------------------------------------------------------------------
class HistogramBase(object):
def __init__(self,bins):
self._bins_locked = False
self.set_bins(bins, False)
def set_bins(self, bins=None, lock = True):
"""
Sets the bins and resets all data to zero; if the lock argument is True
then subsequent calls to this function will not change the bins or reset
the contents
"""
if (self._bins_locked): return self
self._bins_locked = lock
if (bins is None): self.bins = _default_bins
else : self.bins = bins
# one could end up in a situation (e.g. with a
# hists["someName"].set_bins(...,...).add(...) call)
# where the bins are not defined at this stage
# (e.g. if default bins are empty)
#
# In that case, just return an incomplete object,
# knowing there's a chance it will be set up properly
# later...
if (self.bins is None): return self
self.xvalues = self.bins.xvalues
self.xvalue = self.bins.xvalue
self.xhi = self.bins.xhi
self.xlo = self.bins.xlo
self.xmid = self.bins.xmid
self.ibin = self.bins.ibin
# this will need to be implemented in the main class
# (not the base)
self._init_contents()
return self
#----------------------------------------------------------------------
class Histogram(HistogramBase):
'''Object to contains a histogram
'''
def __init__(self, bins = None, name=None):
'''Create a histogram with the binning as specified by bins (or the current default)'''
super(Histogram,self).__init__(bins)
self.name = name
def _init_contents(self):
self.underflow = 0.0
self.overflow = 0.0
self._contents = np.zeros(self.bins.nbins)
self._nentries = 0.0
self._sumwgt = 0.0
self._sumxwgt = 0.0
self._sumx2wgt = 0.0
def add(self, xvalue, weight = 1):
"""
Add an entry to the histogram.
"""
_ibin = self.bins.ibin(xvalue)
self._add_ibin(_ibin, weight)
self._sumxwgt += xvalue * weight
self._sumx2wgt += xvalue**2 * weight
def add_series(self, series, weights = None, weight = 1.0):
"""
Takes data (and optionally weights) in the form of an np array
and add it to the histogram. This is (should be?) much faster than adding
entries individually, because it makes use of the numpy's
histogram routine.
If a weights array is supplied, then weight must be 1
"""
self._nentries += len(series)
if (weights is None):
count, division = np.histogram(series, bins = self.bins.xedges_with_outflow())
self._contents += weight * count[1:-1]
self.underflow += weight * count[0]
self.overflow += weight * count[-1]
self._sumwgt += weight * len(series)
self._sumxwgt += sum(series) * weight
self._sumx2wgt += sum(series**2) * weight
else:
if (weight != 1.0): raise ValueError("weight was {} but should be 1.0 "
"when weights argument is supplied".format(weight))
count, division = np.histogram(series, bins = self.bins.xedges_with_outflow(), weights=weights)
self._contents += count[1:-1]
self.underflow += count[0]
self.overflow += count[-1]
self._sumwgt += sum(weights)
self._sumxwgt += sum(series * weights)
self._sumx2wgt += sum(series**2) * weight
def _add_ibin(self, _ibin, weight):
if (_ibin < 0): self.underflow += weight
elif (_ibin >= self.bins.nbins): self.overflow += weight
else: self._contents[_ibin] += weight
self._nentries += 1
self._sumwgt += weight
def average(self):
if (self._sumwgt != 0.0): return self._sumxwgt/self._sumwgt
else: return 0.0
def yvalues(self):
return self._contents
def error(self):
return self.stddev()/sqrt(max(1,self._nentries-1))
def stddev(self):
if (self._sumwgt != 0.0):
return sqrt(self._sumx2wgt/self._sumwgt - self.average()**2)
else:
return 0.0
def __getitem__(self,i):
return self._contents[i]
def __str__(self, rescale=1.0):
output = ""
if (self.name): output += "# histogram:{}\n".format(self.name)
output += "# nentries = {}, avg = {}+-{}, stddev = {}, underflow = {}, overflow = {}\n".format(
self._nentries, self.average(), self.error(), self.stddev(), self.underflow, self.overflow)
for i in range(len(self._contents)):
output += "{} {} {} {}\n".format(self.bins.xlo(i),
self.bins.xmid(i),
self.bins.xhi(i),
self[i]*rescale)
output +="\n"
return output
#----------------------------------------------------------------------
class ProfileHistogram(HistogramBase):
def __init__(self, bins = None, name=None):
'''Create a profile histogram with bins going from lo to hi with bin size dbin'''
super(ProfileHistogram,self).__init__(bins)
self.name = name
def _init_contents(self):
self.weights = Histogram(self.bins, self.name)
self.weights_times_y = Histogram(self.bins, self.name)
self.weights_times_y2 = Histogram(self.bins, self.name)
self.n_entries = Histogram(self.bins, self.name)
self._total_n_entries = 0.0
def add(self, xvalue, yvalue, weight = 1):
"""
Add an entry to the profile histogram.
"""
_ibin = self.bins.ibin(xvalue)
self._add_ibin(_ibin, yvalue, weight)
def _add_ibin(self, ibin, yvalue, weight = 1):
self.weights . _add_ibin (ibin, weight)
self.weights_times_y. _add_ibin (ibin, weight * yvalue)
self.weights_times_y2._add_ibin (ibin, weight * yvalue**2)
self.n_entries. _add_ibin (ibin, 1.0)
self._total_n_entries += 1.0
def __str__(self):
# prepare some shortcuts
weights = self.weights.yvalues()
weights_times_y = self.weights_times_y.yvalues()
weights_times_y2 = self.weights_times_y2.yvalues()
n_entries = self.n_entries.yvalues()
# then process them
average = weights_times_y / np.where(weights == 0, 1.0, weights)
average2 = weights_times_y2 / np.where(weights == 0, 1.0, weights)
stddev = np.sqrt(np.maximum(0, average2 - average**2))
err = stddev / np.sqrt(np.maximum(n_entries - 1, 1))
# then generate the output
output = ""
if (self.name): output += "# profileHistogram:{}\n".format(self.name)
output += "# xlo xmid xhi average stddev err n_entries\n"
for i in range(len(weights)):
output += "{} {} {} {} {} {} {}\n".format(self.bins.xlo(i),
self.bins.xmid(i),
self.bins.xhi(i),
average[i], stddev[i],
err[i], n_entries[i])
output +="\n"
return output
#----------------------------------------------------------------------
class Histogram2D(object):
'''Object to contains a histogram
'''
def __init__(self, bins_x = None, bins_y = None, name=None):
'''Create a 2d histogram with the binning as specified by bins_x and bins_y (or the current default)'''
self.name = name
self._bins_locked = False
self.set_bins(bins_x, bins_y, False)
def set_bins(self, bins_x = None, bins_y = None, lock = True):
"""
Sets the bins and resets all data to zero; if the lock argument is True
then subsequent calls to this function will not change the bins or reset
the contents
"""
if (self._bins_locked): return self
self._bins_locked = lock
if (bins_x is None): self.bins_x = _default_bins
else: self.bins_x = bins_x
if (bins_y is None): self.bins_y = _default_bins_y
else: self.bins_y = bins_y
# one could end up in a situation (e.g. with a
# hists2D["someName"].set_bins(...,...).add(...) call)
# where the bins are not defined at this stage.
#
# In that case, just return an incomplete object,
# knowing there's a chance it will be set up properly
# later...
if (self.bins_x is None or self.bins_y is None): return self
self.outflow = 0.0
self._contents = np.zeros((self.bins_x.nbins, self.bins_y.nbins))
self._nentries = 0.0
self._sumwgt = 0.0
# by returning self, the user can chain the calls, e.g.
# hists2D["someName"].set_bins(...,...).add(...)
return self
def add(self, xvalue, yvalue, weight = 1):
_ibin_x = self.bins_x.ibin(xvalue)
_ibin_y = self.bins_y.ibin(yvalue)
self._add_ibin(_ibin_x, _ibin_y, weight)
def _add_ibin(self, _ibin_x, _ibin_y, weight):
try:
# watch out: numpy wraps negative indices around...
# so raise an error that will take us to the overflow bin
if (_ibin_x < 0 or _ibin_y < 0): raise IndexError
self._contents[_ibin_x, _ibin_y] += weight
except IndexError:
self.outflow += weight
self._nentries += 1
self._sumwgt += weight
def average(self):
if (self._sumwgt != 0.0): return self._sumxwgt/self._sumwgt
else: return 0.0
def zvalues(self):
return self._contents
def __getitem__(self, pos):
i, j = pos
return self._contents[i, j]
def __str__(self, rescale=1.0):
output = ""
if (self.name): output += "# histogram2d:{}\n".format(self.name)
output += "# nentries = {}, sumwgt = {}, outflow = {}\n".format(
self._nentries, self._sumwgt * rescale, self.outflow * rescale)
if (_compact2D):
for ix in range(self._contents.shape[0]):
for iy in range(self._contents.shape[1]):
output += "{:.6g} {:.6g} {:.6g}\n".format(
self.bins_x.xmid(ix),
self.bins_y.xmid(iy),
self._contents[ix, iy]*rescale)
else:
for ix in range(self._contents.shape[0]):
for iy in range(self._contents.shape[1]):
output += "{} {} {} {} {} {} {}\n".format(self.bins_x.xlo(ix),
self.bins_x.xmid(ix),
self.bins_x.xhi(ix),
self.bins_y.xlo(iy),
self.bins_y.xmid(iy),
self.bins_y.xhi(iy),
self._contents[ix, iy]*rescale)
output +="\n"
output +="\n"
return output
#----------------------------------------------------------------------
class HistogramCollection(object):
"""Contains a collection of histograms, accessed via a dictionary. If
a histogram is absent, then it's created, using the current
defaults (set_defaults) and its title is the key name.
"""
def __init__(self, histogram_type = Histogram, bins = None):
self._histogram_type = histogram_type
self._default_bins = bins
def __getitem__(self,item):
if (item in self.__dict__): return self.__dict__[item]
else:
h = self._histogram_type(bins=self._default_bins, name=item)
self.__dict__[item] = h
return h
def set_default_bins(self, bins):
self._default_bins = bins
def keys(self):
return self.__dict__.keys()
def __str__(self):
"""
Returns all histograms from the collection, without any normalisation.
They are in alphabetical order of the keys.
"""
output = ""
sorted_keys = sorted(self.keys())
for k in sorted_keys:
if (k == "_histogram_type" or k == "_default_bins"): continue
output += str(self[k]) + "\n"
return output
#----------------------------------------------------------------------
class Histogram2DCollection(object):
"""Contains a collection of histograms, accessed via a dictionary. If
a histogram is absent, then it's created, using the current
defaults (set_defaults) and its title is the key name.
"""
def __init__(self, histogram_type = Histogram2D, bins_x = None, bins_y = None):
self._histogram_type = histogram_type
self.set_default_bins(bins_x, bins_y)
def __getitem__(self,item):
if (item in self.__dict__): return self.__dict__[item]
else:
h = self._histogram_type(bins_x=self._default_bins_x, bins_y=self._default_bins_y, name=item)
self.__dict__[item] = h
return h
def set_default_bins(self, bins_x = None, bins_y = None):
self._default_bins_x = bins_x
self._default_bins_y = bins_y
def keys(self):
return self.__dict__.keys()
def __str__(self):
"""
Returns all histograms from the collection, without any normalisation.
They are in alphabetical order of the keys.
"""
output = ""
sorted_keys = sorted(self.keys())
for k in sorted_keys:
if (k == "_histogram_type" or k == "_default_bins_x" or k == "_default_bins_y"): continue
output += str(self[k]) + "\n"
return output
hists = HistogramCollection()
profile_hists = HistogramCollection(ProfileHistogram)
hists2D = Histogram2DCollection()
#----------------------------------------------------------------------
# predefined objects
# for testing
def _run_tests():
# set_default_bins(LinearBins(-2.0, -1.0, 0.5))
x_bins = LinearBins(0.0, 1.0, 0.5)
y_bins = LinearBins(0.0, 4.0, 0.5)
hists["test"].set_bins(LinearBins(5.0, 10.0, 1.0)).add(7.2)
hists["test"].set_bins(LinearBins(5.0, 10.0, 1.0)).add(8.2)
print (hists)
profile_hists["test"].set_bins(LogBins(5.0, 10.0, 0.2)).add(7.2, 2.0)
profile_hists["test"].set_bins(LogBins(5.0, 10.0, 0.2)).add(7.2, 4.0)
print (profile_hists)
#hists2D.set_default_bins(bins_y = LinearBins(0.0, 4.0, 0.5))
#set_default_bins()
#h = Histogram2D(bins_y = LinearBins(0.0, 4.0, 0.5))
hists2D["test"].set_bins(x_bins, y_bins).add(0.7,0.3)
hists2D["test"].set_bins(x_bins, y_bins).add(6.7,0.3)
print(hists2D["test"][1,0])
print(hists2D)
if __name__ == "__main__":
_run_tests()
| [
"frederic.dreyer@cern.ch"
] | frederic.dreyer@cern.ch |
b9bc67f9b186b8fc4dd3eddf7ba732873a83cf1e | e68d23c2018cec1b3f47c96abf50108449a6404b | /src/ner/bilstm-crf/graph_builder.py | 6b5e7a4dc7b0d2009589631d0d974cef801c45fe | [] | no_license | sfu-natlang/neural-network-tagger | 2f82e9e229bb444a2bff20535100b698998a01fd | 3688f019024f4d6d8864dc7770164d691442b4f3 | refs/heads/master | 2021-01-18T18:27:55.682287 | 2018-04-27T16:38:17 | 2018-04-27T16:38:17 | 80,556,059 | 7 | 2 | null | null | null | null | UTF-8 | Python | false | false | 19,394 | py |
"""Builds parser models."""
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops as cf
from tensorflow.python.ops import state_ops
from tensorflow.python.platform import tf_logging as logging
def bidirectional_LSTM(input, hidden_state_dimension, initializer, sequence_length=None, output_sequence=True):
with tf.variable_scope("bidirectional_LSTM"):
if sequence_length == None:
batch_size = 1
sequence_length = tf.shape(input)[1]
sequence_length = tf.expand_dims(sequence_length, axis=0, name='sequence_length')
else:
batch_size = tf.shape(sequence_length)[0]
lstm_cell = {}
initial_state = {}
for direction in ["forward", "backward"]:
with tf.variable_scope(direction):
# LSTM cell
lstm_cell[direction] = tf.contrib.rnn.CoupledInputForgetGateLSTMCell(hidden_state_dimension, forget_bias=1.0, initializer=initializer, state_is_tuple=True)
initial_cell_state = tf.get_variable("initial_cell_state", shape=[1, hidden_state_dimension], dtype=tf.float32, initializer=initializer)
initial_output_state = tf.get_variable("initial_output_state", shape=[1, hidden_state_dimension], dtype=tf.float32, initializer=initializer)
c_states = tf.tile(initial_cell_state, tf.stack([batch_size, 1]))
h_states = tf.tile(initial_output_state, tf.stack([batch_size, 1]))
initial_state[direction] = tf.contrib.rnn.LSTMStateTuple(c_states, h_states)
outputs, final_states = tf.nn.bidirectional_dynamic_rnn(lstm_cell["forward"],
lstm_cell["backward"],
input,
dtype=tf.float32,
sequence_length=sequence_length,
initial_state_fw=initial_state["forward"],
initial_state_bw=initial_state["backward"])
if output_sequence == True:
outputs_forward, outputs_backward = outputs
output = tf.concat([outputs_forward, outputs_backward], axis=2, name='output_sequence')
else:
final_states_forward, final_states_backward = final_states
output = tf.concat([final_states_forward[1], final_states_backward[1]], axis=1, name='output')
return output
def BatchedSparseToDense(sparse_indices, output_size):
"""Batch compatible sparse to dense conversion.
This is useful for one-hot coded target labels.
Args:
sparse_indices: [batch_size] tensor containing one index per batch
output_size: needed in order to generate the correct dense output
Returns:
A [batch_size, output_size] dense tensor.
"""
eye = tf.diag(tf.fill([output_size], tf.constant(1, tf.float32)))
return tf.nn.embedding_lookup(eye, sparse_indices)
def EmbeddingLookupFeatures(params, sparse_features, allow_weights):
"""Computes embeddings for each entry of sparse features sparse_features.
Args:
params: list of 2D tensors containing vector embeddings
sparse_features: 1D tensor of strings. Each entry is a string encoding of
dist_belief.SparseFeatures, and represents a variable length list of
feature ids, and optionally, corresponding weights values.
allow_weights: boolean to control whether the weights returned from the
SparseFeatures are used to multiply the embeddings.
Returns:
A tensor representing the combined embeddings for the sparse features.
For each entry s in sparse_features, the function looks up the embeddings
for each id and sums them into a single tensor weighing them by the
weight of each id. It returns a tensor with each entry of sparse_features
replaced by this combined embedding.
"""
if not isinstance(params, list):
params = [params]
# Lookup embeddings.
st = tf.string_split(sparse_features, delimiter=',')
sparse_features = tf.string_to_number(st.values, out_type=tf.int32)
embeddings = tf.nn.embedding_lookup(params, sparse_features)
return embeddings
class GreedyTagger(object):
"""Builds a Chen & Manning style greedy neural net tagger
Builds a graph with an optional reader op connected at one end and
operations needed to train the network on the other. Supports multiple
network instantiations sharing the same parameters and network topology.
The following named nodes are added to the training and eval networks:
epochs: a tensor containing the current epoch number
cost: a tensor containing the current training step cost
gold_actions: a tensor containing actions from gold decoding
feature_endpoints: a list of sparse feature vectors
logits: output of the final layer before computing softmax
The training network also contains:
train_op: an op that executes a single training step
"""
def __init__(self,
num_actions,
num_features,
num_feature_ids,
embedding_sizes,
hidden_layer_sizes,
seed=None,
gate_gradients=False,
use_locking=False,
embedding_init=1.0,
relu_init=1e-4,
bias_init=0.2,
softmax_init=1e-4,
averaging_decay=0.9999,
use_averaging=True,
check_parameters=True,
check_every=1,
allow_feature_weights=False,
only_train='',
arg_prefix=None,
**unused_kwargs):
"""Initialize the graph builder with parameters defining the network.
Args:
num_actions: int size of the set of parser actions
num_features: int list of dimensions of the feature vectors
num_feature_ids: int list of same length as num_features corresponding to
the sizes of the input feature spaces
embedding_sizes: int list of same length as num_features of the desired
embedding layer sizes
hidden_layer_sizes: int list of desired relu layer sizes; may be empty
seed: optional random initializer seed to enable reproducibility
gate_gradients: if True, gradient updates are computed synchronously,
ensuring consistency and reproducibility
use_locking: if True, use locking to avoid read-write contention when
updating Variables
embedding_init: sets the std dev of normal initializer of embeddings to
embedding_init / embedding_size ** .5
relu_init: sets the std dev of normal initializer of relu weights
to relu_init
bias_init: sets constant initializer of relu bias to bias_init
softmax_init: sets the std dev of normal initializer of softmax init
to softmax_init
averaging_decay: decay for exponential moving average when computing
averaged parameters, set to 1 to do vanilla averaging
use_averaging: whether to use moving averages of parameters during evals
check_parameters: whether to check for NaN/Inf parameters during
training
check_every: checks numerics every check_every steps.
allow_feature_weights: whether feature weights are allowed.
only_train: the comma separated set of parameter names to train. If empty,
all model parameters will be trained.
arg_prefix: prefix for context parameters.
"""
self._num_actions = num_actions
self._num_features = num_features
self._num_feature_ids = num_feature_ids
self._embedding_sizes = embedding_sizes
self._hidden_layer_sizes = hidden_layer_sizes
self._seed = seed
self._gate_gradients = gate_gradients
self._use_locking = use_locking
self._use_averaging = use_averaging
self._check_parameters = check_parameters
self._check_every = check_every
self._allow_feature_weights = allow_feature_weights
self._only_train = set(only_train.split(',')) if only_train else None
self._feature_size = len(embedding_sizes)
self._embedding_init = embedding_init
self._relu_init = relu_init
self._softmax_init = softmax_init
self._arg_prefix = arg_prefix
# Parameters of the network with respect to which training is done.
self.params = {}
# Other variables, with respect to which no training is done, but which we
# nonetheless need to save in order to capture the state of the graph.
self.variables = {}
# Operations to initialize any nodes that require initialization.
self.inits = {}
# Training- and eval-related nodes.
self.training = {}
self.evaluation = {}
self.saver = None
# Nodes to compute moving averages of parameters, called every train step.
self._averaging = {}
self._averaging_decay = averaging_decay
# After the following 'with' statement, we'll be able to re-enter the
# 'params' scope by re-using the self._param_scope member variable. See for
# instance _AddParam.
self.input = tf.placeholder(dtype=tf.string)
self.labels = tf.placeholder(dtype=tf.int32)
self.dropout = tf.placeholder(tf.float32)
self.input_type_indices = tf.placeholder(tf.int32, [None], name="input_type_indices")
self.input_mention_length = tf.placeholder(tf.int32, [None], name="input_mention_length")
self.input_mention_indices = tf.placeholder(tf.int32, [None, None], name="input_mention_indices")
with tf.name_scope('params') as self._param_scope:
self._relu_bias_init = tf.constant_initializer(bias_init)
self.training.update(self._BuildNetwork(self.input,
return_average=False))
@property
def embedding_size(self):
size = 0
for i in range(self._feature_size):
size += self._num_features[i] * self._embedding_sizes[i]
return size
def _AddParam(self,
shape,
dtype,
name,
initializer=None,
return_average=False):
"""Add a model parameter w.r.t. we expect to compute gradients.
_AddParam creates both regular parameters (usually for training) and
averaged nodes (usually for inference). It returns one or the other based
on the 'return_average' arg.
Args:
shape: int list, tensor shape of the parameter to create
dtype: tf.DataType, data type of the parameter
name: string, name of the parameter in the TF graph
initializer: optional initializer for the paramter
return_average: if False, return parameter otherwise return moving average
Returns:
parameter or averaged parameter
"""
if name not in self.params:
step = tf.cast(self.GetStep(), tf.float32)
# Put all parameters and their initializing ops in their own scope
# irrespective of the current scope (training or eval).
with tf.name_scope(self._param_scope):
self.params[name] = tf.get_variable(name, shape, dtype, initializer)
param = self.params[name]
if initializer is not None:
self.inits[name] = state_ops.init_variable(param, initializer)
if self._averaging_decay == 1:
logging.info('Using vanilla averaging of parameters.')
ema = tf.train.ExponentialMovingAverage(decay=(step / (step + 1.0)),
num_updates=None)
else:
ema = tf.train.ExponentialMovingAverage(decay=self._averaging_decay,
num_updates=step)
self._averaging[name + '_avg_update'] = ema.apply([param])
self.variables[name + '_avg_var'] = ema.average(param)
self.inits[name + '_avg_init'] = state_ops.init_variable(
ema.average(param), tf.constant_initializer(0.0))
return (self.variables[name + '_avg_var'] if return_average else
self.params[name])
def GetStep(self):
def OnesInitializer(shape, dtype=tf.float32, partition_info=None):
return tf.ones(shape, dtype)
return self._AddVariable([], tf.int32, 'step', OnesInitializer)
def _AddVariable(self, shape, dtype, name, initializer=None):
if name in self.variables:
return self.variables[name]
self.variables[name] = tf.get_variable(name, shape, dtype, initializer)
if initializer is not None:
self.inits[name] = state_ops.init_variable(self.variables[name],
initializer)
return self.variables[name]
def _ReluWeightInitializer(self):
with tf.name_scope(self._param_scope):
return tf.random_normal_initializer(stddev=self._relu_init,
seed=self._seed)
def _EmbeddingMatrixInitializer(self, index, embedding_size):
return tf.random_normal_initializer(
stddev=self._embedding_init / embedding_size**.5,
seed=self._seed)
def _AddEmbedding(self,
features,
num_features,
num_ids,
embedding_size,
index,
return_average=False):
"""Adds an embedding matrix and passes the `features` vector through it."""
embedding_matrix = self._AddParam(
[num_ids, embedding_size],
tf.float32,
'embedding_matrix_%d' % index,
self._EmbeddingMatrixInitializer(index, embedding_size),
return_average=return_average)
embedding = EmbeddingLookupFeatures(embedding_matrix,
tf.reshape(features,
[-1],
name='feature_%d' % index),
self._allow_feature_weights)
return tf.reshape(embedding, [-1, num_features * embedding_size])
def _BuildNetwork(self, feature_endpoints, return_average=False):
"""Builds a feed-forward part of the net given features as input.
The network topology is already defined in the constructor, so multiple
calls to BuildForward build multiple networks whose parameters are all
shared. It is the source of the input features and the use of the output
that distinguishes each network.
Args:
feature_endpoints: tensors with input features to the network
return_average: whether to use moving averages as model parameters
Returns:
logits: output of the final layer before computing softmax
"""
# Create embedding layer.
embeddings = []
for i in range(self._feature_size):
embeddings.append(self._AddEmbedding(feature_endpoints[i],
self._num_features[i],
self._num_feature_ids[i],
self._embedding_sizes[i],
i,
return_average=return_average))
last_layer = tf.concat(embeddings, 1)
last_layer = tf.nn.dropout(last_layer, self.dropout)
last_layer_size = self.embedding_size
# Create ReLU layers.
for i, hidden_layer_size in enumerate(self._hidden_layer_sizes):
weights = self._AddParam(
[last_layer_size, hidden_layer_size],
tf.float32,
'weights_%d' % i,
self._ReluWeightInitializer(),
return_average=return_average)
bias = self._AddParam([hidden_layer_size],
tf.float32,
'bias_%d' % i,
self._relu_bias_init,
return_average=return_average)
last_layer = tf.nn.relu_layer(last_layer,
weights,
bias,
name='layer_%d' % i)
last_layer_size = hidden_layer_size
# Create softmax layer.
softmax_weight = self._AddParam(
[last_layer_size, self._num_actions],
tf.float32,
'softmax_weight',
tf.random_normal_initializer(stddev=self._softmax_init,
seed=self._seed),
return_average=return_average)
softmax_bias = self._AddParam(
[self._num_actions],
tf.float32,
'softmax_bias',
tf.constant_initializer(0.0),
return_average=return_average)
logits = tf.nn.xw_plus_b(last_layer,
softmax_weight,
softmax_bias,
name='logits')
predictions = tf.argmax(logits, 1, name="predictions")
# Create CRF layer.
small_score = -1000.0
large_score = 0.0
sequence_length = tf.shape(logits)[0]
unary_scores_with_start_and_end = tf.concat([logits, tf.tile( tf.constant(small_score, shape=[1, 2]) , [sequence_length, 1])], 1)
start_unary_scores = [[small_score] * self._num_actions + [large_score, small_score]]
end_unary_scores = [[small_score] * self._num_actions + [small_score, large_score]]
unary_scores = tf.concat([start_unary_scores, unary_scores_with_start_and_end, end_unary_scores], 0)
start_index = self._num_actions
end_index = self._num_actions + 1
input_label_indices_flat_with_start_and_end = tf.concat([ tf.constant(start_index, shape=[1]), self.labels, tf.constant(end_index, shape=[1]) ], 0)
sequence_lengths = tf.expand_dims(sequence_length, axis=0, name='sequence_lengths')
unary_scores_expanded = tf.expand_dims(unary_scores, axis=0, name='unary_scores_expanded')
input_label_indices_flat_batch = tf.expand_dims(input_label_indices_flat_with_start_and_end, axis=0, name='input_label_indices_flat_batch')
transition_parameters = self._AddParam(
[self._num_actions+2, self._num_actions+2],
tf.float32,
'trainable_params',
tf.contrib.layers.xavier_initializer(),
return_average=return_average
)
log_likelihood, _ = tf.contrib.crf.crf_log_likelihood(
unary_scores_expanded, input_label_indices_flat_batch, sequence_lengths, transition_params=transition_parameters)
boundry_loss = tf.reduce_mean(-log_likelihood, name='cost')
embedded_mentions = tf.nn.embedding_lookup(last_layer, self.input_mention_indices, name='embedded_mentions')
mention_lstm_output = bidirectional_LSTM(embedded_mentions, 128, tf.contrib.layers.xavier_initializer(),
sequence_length=self.input_mention_length, output_sequence=False)
W = tf.get_variable(
"W",
shape=[256, 5],
initializer=tf.contrib.layers.xavier_initializer())
b = tf.Variable(tf.constant(0.0, shape=[4]), name="bias")
self.type_scores = tf.nn.xw_plus_b(mention_lstm_output, W, b, name="scores")
self.type_predictions = tf.argmax(self.type_scores, 1, name="predictions")
type_loss = tf.reduce_sum(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=self.input_type_indices, logits=self.type_scores))
loss_sum = tf.add(type_loss, boundry_loss)
# Add the optimizer
trainable_params = self.params.values()
optimizer = tf.train.GradientDescentOptimizer(0.005)
train_op_sum = optimizer.minimize(loss_sum, var_list=trainable_params)
return {'predictions': predictions,
'unary_scores': unary_scores,
'cost_sum': loss_sum,
'cost_boundry': boundry_loss,
'train_op_boundry': train_op_boundry,
'train_op_sum': train_op_sum,
'transition_parameters': transition_parameters}
| [
"expandwings@live.cn"
] | expandwings@live.cn |
a2d611560a46053248bed084f908d83a6834e775 | 4145f057e992332163ea7d5f44999001ef25154f | /examples/src/main/python/KMeansWeather.py | 1eed417c187bb2f1c408c1d27a7c166b9590414c | [
"BSD-3-Clause",
"LicenseRef-scancode-generic-cla",
"LicenseRef-scancode-free-unknown",
"CDDL-1.1",
"MPL-1.1",
"EPL-1.0",
"LGPL-2.0-or-later",
"JSON",
"LGPL-2.1-only",
"LicenseRef-scancode-other-permissive",
"GPL-1.0-or-later",
"CPL-1.0",
"LicenseRef-scancode-unknown-license-reference",
"Ap... | permissive | alexBaiJW/snappydata | a08eca9a8102ae0ae6dff4afead777202e0168d8 | adf4664d824cf4d2b7a65ce4eb4903bfce8c4f78 | refs/heads/master | 2021-04-27T00:19:07.151902 | 2018-02-27T08:55:03 | 2018-02-27T09:25:16 | 123,792,398 | 0 | 0 | Apache-2.0 | 2018-03-04T13:42:55 | 2018-03-04T13:42:55 | null | UTF-8 | Python | false | false | 3,724 | py | #
# Copyright (c) 2017 SnappyData, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you
# may not use this file except in compliance with the License. You
# may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing
# permissions and limitations under the License. See accompanying
# LICENSE file.
#
# This example uses some random ID and random temperature to
# prepare the data. After preparing the model, it applies the original data set to
# predict the temperature
from __future__ import print_function
from pyspark.conf import SparkConf
from pyspark.context import SparkContext
from pyspark.sql.snappy import SnappySession
from pyspark.rdd import *
from pyspark.ml.clustering import KMeans, KMeansModel
import random
import os
# Create SnappyData Tables and insert synthetic data
def createPartitionedTableUsingSQL(snappy):
snappy.sql("DROP TABLE IF EXISTS WEATHER")
snappy.sql("CREATE TABLE WEATHER(" +
"id integer NOT NULL PRIMARY KEY," +
"DayOfMonth FLOAT NOT NULL ," +
"WeatherDegrees FLOAT NOT NULL)" +
"USING ROW OPTIONS (PARTITION_BY 'DayOfMonth')")
print
print("Inserting data into WEATHER table")
counter = 0
while counter < 100:
counter = counter + 1
snappy.sql("INSERT INTO WEATHER VALUES (" + str(counter) + "," + str(random.randint(1, 32)) + "," + str(
random.randint(1, 120)) + ")")
print("printing contents of WEATHER table")
snappy.sql("SELECT * FROM WEATHER").show(100)
print("DONE")
#
def applyKMeans(snappy):
# Selects and parses the data from the table created earlier
data = snappy.sql("SELECT id, WeatherDegrees FROM WEATHER")
parsedData = data.rdd.map(lambda row: (row["ID"], str(row["WEATHERDEGREES"])))
result = sorted(parsedData.collect(), key=lambda tup: tup[0])
# Writes the data into the parsedData text file for training
print("Writing parsed data to weatherdata/parsedData.txt")
if not os.path.exists("weatherdata"):
os.makedirs("weatherdata")
a = open("weatherdata/parsedData.txt", 'w')
c = 0
for y in result:
x = str(c) + " " + "1:" + str(y[1]) + " " + "2:" + str(y[1]) + " " + "3:" + str(y[1])
print(x)
a.write(x + "\n")
c = c + 1
a.close()
# Trains the data in order to pass it to the KMeans Clustering Function
dataset = snappy.read.format("libsvm").load("weatherdata/parsedData.txt")
print("dataset is " + str(dataset))
kmeans = KMeans().setK(4).setSeed(2)
model = kmeans.fit(dataset)
# Evaluate clustering by computing Within Set Sum of Squared Errors.
wssse = model.computeCost(dataset)
print("Within Set Sum of Squared Errors = " + str(wssse))
# Shows the result, as both the cluster centers, and a table with the cluster assignments in the Predictions column
centers = model.clusterCenters()
print("Cluster Centers: ")
for center in centers:
print(center)
transformedDF = model.transform(dataset)
transformedDF.show(100)
def main(snappy):
createPartitionedTableUsingSQL(snappy)
applyKMeans(snappy)
print("FINISHED ##########")
if __name__ == "__main__":
# Configure Spark
conf = SparkConf().setAppName('SnappyData KMeans').setMaster("local[*]")
sc = SparkContext(conf=conf)
snappy = SnappySession(sc)
main(snappy)
| [
"noreply@github.com"
] | alexBaiJW.noreply@github.com |
410d214d94d11b70963d1dcf14857ec6ff158b17 | 854b8ea9a080d844ffbcdd694e7d2d03bf301aa0 | /tinymcetest/app/admin.py | f37e1d8131ab431d3097129621eeb02245d391a6 | [] | no_license | hagsteel/tinymceissue | 9b8aa6aa2c9cdf20702f13f47634f3bc704dd993 | e7e59607e3c58e4324fe2d9766d881b5bc21c2b6 | refs/heads/master | 2021-05-27T12:47:38.898826 | 2014-04-09T17:21:36 | 2014-04-09T17:21:36 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 245 | py | from django.contrib import admin
from .models import Foo, Bar
class BarInlines(admin.StackedInline):
model = Bar
class FooAdmin(admin.ModelAdmin):
inlines = [BarInlines, ]
admin.site.register(Foo, FooAdmin)
admin.site.register(Bar) | [
"hagstedt@gmail.com"
] | hagstedt@gmail.com |
c2303d7995bb929e1f67d095adaebe5eeb4f51ef | 9cf7b376db72e46346d468bbc537ec92612b557d | /flask-connexion-alchemy-tutorial/people.py | 61e4e994ba7700119b78a590a0902022aa7166c4 | [] | no_license | thiagowig/python | c1c961aef36044f0770f5b664ce9e786c3919adf | d1481c51f6f4d4a082893de677631524cfc6c49a | refs/heads/master | 2023-08-04T05:04:03.582686 | 2023-07-24T22:06:11 | 2023-07-24T22:06:11 | 92,443,539 | 0 | 0 | null | 2023-01-07T07:50:48 | 2017-05-25T20:53:03 | Python | UTF-8 | Python | false | false | 1,968 | py | from flask import make_response, abort
from config import db
from models import Person, PersonSchema, Note
def read_all():
people = Person.query.order_by(Person.lname).all()
person_schema = PersonSchema(many=True)
data = person_schema.dump(people).data
return data
def read_one(person_id):
person = (
Person.query.filter(Person.person_id == person_id)
.outerjoin(Note)
.one_or_none()
)
if person is not None:
person_schema = PersonSchema()
data = person_schema.dump(person).data
return data
else:
abort(404, "Person not found")
def create(person):
fname = person.get("fname")
lname = person.get("lname")
existing_person = (
Person.query.filter(Person.fname == fname)
.filter(Person.lname == lname)
.one_or_none()
)
if existing_person is None:
schema = PersonSchema()
new_person = schema.load(person, session=db.session).data
db.session.add(new_person)
db.session.commit()
data = schema.dump(new_person).data
return data, 201
else:
abort(409, "Person exists already")
def update(person_id, person):
update_person = Person.query.filter(
Person.person_id == person_id
).one_or_none()
if update_person is not None:
schema = PersonSchema()
update = schema.load(person, session=db.session).data
update.person_id = update_person.person_id
db.session.merge(update)
db.session.commit()
data = schema.dump(update_person).data
return data, 200
else:
abort(404, "Person not found")
def delete(person_id):
person = Person.query.filter(Person.person_id == person_id).one_or_none()
if person is not None:
db.session.delete(person)
db.session.commit()
return make_response("Person deleted", 200)
else:
abort(404, "Person not found") | [
"dev.thiago@gmail.com"
] | dev.thiago@gmail.com |
44d74ba7a51c8a4e89e75c6f7299e7f62805d62d | 042a18297499faf5b4e5e3f3d09871203c06cccc | /pcentra_project/urls.py | cee845d93507a88cf84e6f4083d8bccd981f3684 | [] | no_license | GSchpektor/url-shortener | a3499fdc17bb67a190bd4b6817412323beeb6e57 | bfcd09960386ac2be3983a8483c50f9b85c54be3 | refs/heads/master | 2023-06-05T18:39:02.926799 | 2021-06-28T17:26:50 | 2021-06-28T17:26:50 | 381,111,131 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 801 | py | """pcentra_project URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.1/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import path, include
urlpatterns = [
path('admin/', admin.site.urls),
path('', include('app1.urls'))
]
| [
"gschpektor@gmail.com"
] | gschpektor@gmail.com |
9fd24741ecd9a3648f3996976db6dd8c50b4812b | 273174e67bf271a20a221c15f663c418c5f6ee55 | /useful/split-dataset.py | d0efceb9cf9aa5461ce7ea963b02919e09997a30 | [] | no_license | Fang789/F-test | e1ded46c327f1e4c8cde34939cfa188bd42e0cce | eacbf5a2ac8bb097da7e731aefc7b4953b18ca30 | refs/heads/master | 2020-04-24T09:15:00.789969 | 2019-06-25T03:29:48 | 2019-06-25T03:29:48 | 171,857,098 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,628 | py | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import argparse
import glob
import os
parser = argparse.ArgumentParser()
# 添加默认路径则可以直接执行该程序
parser.add_argument("--dir", type=str, required=True, help="path to folder containing images")
parser.add_argument("--train_frac", type=float, default=0.8, help="percentage of images to use for training set")
parser.add_argument("--test_frac", type=float, default=0.1, help="percentage of images to use for test set")
parser.add_argument("--val_frac", type=float, default=0.1, help="percentage of images to use for val set")
parser.add_argument("--sort", action="store_true", help="if set, sort the images instead of shuffling them")
a = parser.parse_args()
def main():
random.seed(0)
files = glob.glob(os.path.join(a.dir, "*.png"))
files.sort()
assignments = []
assignments.extend(["train"] * int(a.train_frac * len(files)))
assignments.extend(["test"] * int(a.test_frac * len(files)))
assignments.extend(["val"] * int(len(files) - len(assignments)))
if not a.sort:
random.shuffle(assignments)
for name in ["train", "val", "test"]:
if name in assignments:
d = os.path.join(a.dir, name)
if not os.path.exists(d):
os.makedirs(d)
print(len(files), len(assignments))
for inpath, assignment in zip(files, assignments):
outpath = os.path.join(a.dir, assignment, os.path.basename(inpath))
print(inpath, "->", outpath)
os.rename(inpath, outpath)
main()
| [
"18222604921@163.com"
] | 18222604921@163.com |
07ccca1ad2d1ac1eabc7ee6a124434a18a9abf44 | 5e5799e0ccce7a72d514fbc76dcb0a2108013f18 | /DAQConst.py | 97bc899ca96bfab6e6bceb5513c84de6b84fe56f | [] | no_license | sourcery-ai-bot/dash | 6d68937d225473d06a18ef64079a4b3717b5c12c | e1d1c3a601cd397d2508bfd4bb12bdb4e878cd9a | refs/heads/master | 2023-03-07T17:15:39.174964 | 2011-03-01T17:11:21 | 2011-03-01T17:11:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 493 | py | #!/usr/bin/env python
#
# DAQ Constant values
class DAQPort(object):
"DAQLive port"
DAQLIVE = 6659
"IceCube Live logging/monitoring port"
I3LIVE = 6666
"CnCServer XML-RPC port"
CNCSERVER = 8080
"CnCServer->DAQRun logging port"
CNC2RUNLOG = 8999
"DAQRun XML-RPC port"
DAQRUN = 9000
"DAQRun catchall logging port"
CATCHALL = 9001
"First port used by DAQRun for individual component logging"
RUNCOMP_BASE = 9002
| [
"dglo@icecube.wisc.edu"
] | dglo@icecube.wisc.edu |
c69163075924211a4fc14d23cac38ec78f832478 | f91c1fc1e9479b71bffd415b35075ed49774c565 | /tf-complex-model.py | 4e7d5a9d93f45079aebf5778b29b6d61e8aebb30 | [] | no_license | markschmidt42/nn-test | 6f54dd576dc8e38bdc42fbdc46bc9ab951ee8cdf | b2a4292fd91a2470e2bf1473ed309b8a538cab6d | refs/heads/master | 2023-04-13T02:08:40.235298 | 2019-10-29T10:32:13 | 2019-10-29T10:32:13 | 217,711,294 | 0 | 0 | null | 2023-03-25T00:17:24 | 2019-10-26T13:17:22 | Python | UTF-8 | Python | false | false | 3,003 | py | from __future__ import absolute_import, division, print_function, unicode_literals
import sys
import os.path
import pathlib
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import utils.pymark as pymark
# https://www.tensorflow.org/api_docs/python/tf/estimator/DNNRegressor
print(tf.__version__)
DATA_TYPE = 'complex'
data_type = sys.argv[1] if len(sys.argv) == 2 else DATA_TYPE
TRAIN_TEST_DATA_CSV = f'data/{data_type}_train_test.csv'
PREDICT_DATA_CSV = f'data/{data_type}_predict.csv'
EPOCHS = 5000
VALIDATION_SPLIT_PERCENT = 0.2
if not os.path.exists(TRAIN_TEST_DATA_CSV):
print(f'ERROR: File does not exist: {TRAIN_TEST_DATA_CSV}')
print(f'Please run the following to genrate data:\n\tpython generate-data.py --type {data_type}')
sys.exit()
train_dataset, train_labels, test_dataset, test_labels, output_column_name = pymark.get_data(TRAIN_TEST_DATA_CSV, normalize=True)
print(train_dataset.tail())
input_size = len(train_dataset.keys())
def build_model():
model = keras.Sequential([
layers.Dense(100, activation='tanh', kernel_initializer='random_normal', input_shape=[input_size]),
# layers.Dropout(0.2),
layers.Dense(100, activation='tanh'),
layers.Dense(100, activation='tanh'),
# layers.Dense(100, activation='tanh'),
# layers.Dropout(0.2),
layers.Dense(1, activation='linear')
])
# optimizer = tf.keras.optimizers.RMSprop(0.001)
# optimizer = tf.keras.optimizers.Adam(0.0001)
optimizer = tf.keras.optimizers.SGD(lr=0.001);
model.compile(loss='mse',
optimizer=optimizer,
metrics=['mae', 'mse'])
return model
#end def ------------------------------------------------------------------------------------------
model = build_model()
model.summary()
# Display training progress by printing a single dot for each completed epoch
class PrintDot(keras.callbacks.Callback):
def on_epoch_end(self, epoch, logs):
if epoch % 50 == 0: print('')
print(f'{epoch},', end='')
# The patience parameter is the amount of epochs to check for improvement
early_stop = keras.callbacks.EarlyStopping(monitor='val_loss', patience=20)
# let's test the model BEFORE we train
#pymark.test_model(model, 'test_dataset', test_dataset, test_labels, output_column_name)
history = model.fit(
train_dataset,
train_labels,
epochs=EPOCHS,
validation_split=VALIDATION_SPLIT_PERCENT,
verbose=0,
callbacks=[early_stop, PrintDot()]
)
pymark.plot_history(history, output_column_name)
# let's test the model with our test data (from the training set)
pymark.test_model(model, 'test_dataset', test_dataset, test_labels, output_column_name)
# let's try it on some brand new data it has never seen
predict_dataset, predict_labels, output_column_name = pymark.get_data(PREDICT_DATA_CSV, normalize=True, split_percent=0)
pymark.test_model(model, 'predict_dataset', predict_dataset, predict_labels, output_column_name) | [
"mschmidt@bizstream.com"
] | mschmidt@bizstream.com |
bdf86107848bfb2b46e20c1e5b675e7ce5077039 | 183d00848dbdf354510c1021e35816b9b60ad562 | /settings.py | a348582f1f32902345f3644e8a582018f69fcf14 | [] | no_license | cs4246/aivle-similarity | 27ef8e3870287e7f0951050c215ea69100e2069c | baa573b988f9f435febf0d120dc07f4354a6433b | refs/heads/master | 2021-07-16T04:58:32.568238 | 2020-11-18T07:23:23 | 2020-11-18T07:23:23 | 224,840,104 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 285 | py | import os
from dotenv import load_dotenv
load_dotenv()
TASK_API = os.getenv("TASK_API")
SIMILARITY_API = os.getenv("SIMILARITY_API")
USERNAME = os.getenv("USERNAME")
PASSWORD = os.getenv("PASSWORD")
AGENTS_PATH = os.getenv("AGENTS_PATH")
TEMPLATES_PATH = os.getenv("TEMPLATES_PATH")
| [
"rizki.idea@gmail.com"
] | rizki.idea@gmail.com |
a2d45d6ff87f09e29389e37874d514eaee9d3a6b | 3ede36028c5cdce88fb010ffd0d8c6e1bb551628 | /Section_4/imports/relative/some_script3.py | 2d5ac47a3b0e9a8fc740a646f264a16c5f3b656b | [
"MIT"
] | permissive | PacktPublishing/Software-Engineering-with-Python-3.x | 6baa85ec673fc2e826e195969929523811b0f439 | 056e4c89e4f8d7fc4a4095ee0671d6944a86630e | refs/heads/master | 2022-07-15T04:43:02.702704 | 2021-01-21T08:54:54 | 2021-01-21T08:54:54 | 237,384,674 | 2 | 2 | MIT | 2022-06-22T01:23:19 | 2020-01-31T07:56:26 | Jupyter Notebook | UTF-8 | Python | false | false | 90 | py | from .relative_script1 import dummy_func
print("Running from some_script3")
dummy_func()
| [
"git.tut123@gmail.com"
] | git.tut123@gmail.com |
a289d33e5f4de9f5ab9f3406de9681a4a963b382 | 29fa882745c0341f17d6b55f19652d15b4ada6f3 | /ex3.py | 36857aeac3af1d870e38025bdf0e8735e548cab8 | [] | no_license | jackieShao/Python | 59ee087ed96439dc0abf434b5406b1e58dc8454a | 79d5617d4ad16efab3bc5fc583fac8333fc860e1 | refs/heads/master | 2020-04-11T20:45:49.146547 | 2018-12-17T18:50:10 | 2018-12-17T18:50:10 | 162,081,753 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 210 | py | print(float(123))
print(float('123'))
print(float('123.23'))
print(int(123.23))
print(int('123.23'))
print(int(float('123.23')))
print(str(12))
print(str(12.2))
print(bool('a'))
print(bool(0))
print(bool(0.1))
| [
"noreply@github.com"
] | jackieShao.noreply@github.com |
0a4c749eb677cb351a555ea0989a6e10260cf8ce | 06bd4fe15302576747ad06e4165646215a261ed0 | /Fibonacci.py | 6047a53ed82a707c4c6d55a925e6eb1117fb6e50 | [] | no_license | rpural/python | fdf2ca3e870b55013b0d11005882431ed8925553 | 103ead442dc23564748d3ce132dbe626a88770ae | refs/heads/master | 2022-11-30T16:59:04.281236 | 2022-11-28T00:43:38 | 2022-11-28T00:43:38 | 240,421,397 | 0 | 1 | null | 2021-02-26T15:49:56 | 2020-02-14T03:37:11 | null | UTF-8 | Python | false | false | 416 | py | import functools
@functools.lru_cache(maxsize=1024)
def fibonacci(n):
if n <= 2:
return 1
else:
return fibonacci(n-1) + fibonacci(n-2)
# you can also self-memoize the function
def fibonacciM(n, memo={}):
if n in memo:
return memo[n]
if n <= 2:
return 1
memo[n] = fibonacciM(n-1) + fibonacciM(n-2)
return memo[n]
for i in range(1, 31):
print(f"term {i}: {fibonacci(i)} or {fibonacciM(i)}")
| [
"noreply@github.com"
] | rpural.noreply@github.com |
de21d1df4928b81a797c1f8046c31720d97d7526 | c66ff8c9e5827eac5b49559055f5c4c911e4c455 | /Leet_1498.py | 14784e1401cf548fe99153f8ebcd2d7fc593ccc8 | [] | no_license | easyoffbang/FORIF | 24f7702d6381a48a6c8116c5616638abeb488664 | 86dd223c87c508cce526b1c6fe0eb690b53f1e0b | refs/heads/master | 2023-01-08T18:57:54.291754 | 2020-11-13T09:38:20 | 2020-11-13T09:38:20 | 302,309,189 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 585 | py | class Solution:
def numSubseq(self, nums: List[int], target: int) -> int:
nums.sort()
print(nums)
left = 0
right = len(nums) - 1
res = 0
while (left < right):
min_v = nums[left]
max_v = nums[right]
sum = min_v + max_v
if sum <= target:
count = right - left
res = res + 2 ** count
left += 1
else:
right -= 1
if nums[left] * 2 <= target:
res += 1
return res % (10 ** 9 + 7)
| [
"46309807+easyoffbang@users.noreply.github.com"
] | 46309807+easyoffbang@users.noreply.github.com |
d7c2f544a7f24402d16405583aa845ea30955539 | 771338ff598a3a354864448e2ba539b46337f99e | /references/face-recognition-incomplete/src/images/faces.py | 7706749c3ba3bf19003729d6d70ce6d162708399 | [] | no_license | DanBrown47/ThirdEye | 51b6cc26508bf4a14d195fe817e9124256b823b0 | 5e608b40aea87e050fd807ecf18ef0a2f94add96 | refs/heads/master | 2023-01-06T21:17:11.430645 | 2020-08-14T13:06:57 | 2020-08-14T13:06:57 | 287,532,918 | 5 | 0 | null | 2020-11-02T03:30:08 | 2020-08-14T12:55:24 | Python | UTF-8 | Python | false | false | 611 | py | import numpy as np
import cv2
face_cascade = cv2.CascadeClassifier('../cascades/data/haarcascade_frontalface_alt2.xml')
cap = cv2.VideoCapture(0)
while(True):
#capture frame by frame
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, scaleFactor=1.5, minNeighbors = 5)
for (x,y,w,h) in faces:
print(x,y,w,h)
#display the resulting frame
cv2.imshow('frame', frame)
if cv2.waitKey(20) & 0xFF == ord('q'):
break
#when everything is done release the capture
cap.release()
cv2.destroyAllWindows() | [
"danwnad47@gmail.com"
] | danwnad47@gmail.com |
72f545c6a4661c3d3be071de5cfd41ea9c315f19 | 50f4716b7d8f28eb47e9ad88180ae67e1b10c94f | /assignment1.py | 868781c1c42233e52d0acf54925e3a9011a611ca | [] | no_license | Nehap-710/MyCaptainProjects | 33b40b33f89edeb71caf1ce4a0e61ed9379a03d1 | 63d1c6030293aa11865e490450cc6b40637db6fd | refs/heads/master | 2022-11-05T04:33:53.569205 | 2020-06-19T06:49:44 | 2020-06-19T06:49:44 | 270,611,935 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 134 | py | r=eval(input("Input the radius of the cirle: "))
a=22/7
b=r**2
c=b*a
print("The area of circle of radius "+str(r)+" is "+str(c))
| [
"noreply@github.com"
] | Nehap-710.noreply@github.com |
4bc61112ae24b9d7e81bceb35f1ae037eeb26ade | eee0a65bd68a281e37e064347003e20f1ecf7845 | /Lab 2/go_to_cube/go_to_cube.py | db51c3b31222693e9e00082e4d6545ccd9c2f331 | [] | no_license | Jdbb12899/Robotics-Computer-Vision | 34bc85b08410adbdea4e828925ea69a4d195baef | c2415f880dd7bd9dee799748555fff849fd4ef71 | refs/heads/main | 2023-07-10T14:15:15.138019 | 2021-08-23T02:09:54 | 2021-08-23T02:09:54 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,894 | py | #!/usr/bin/env python3
#!c:/Python35/python3.exe -u
import asyncio
import sys
import cv2
import numpy as np
import cozmo
import time
import os
from glob import glob
from find_cube import *
try:
from PIL import ImageDraw, ImageFont
except ImportError:
sys.exit('run `pip3 install --user Pillow numpy` to run this example')
def nothing(x):
pass
YELLOW_LOWER = np.array([9, 115, 151])
YELLOW_UPPER = np.array([179, 215, 255])
GREEN_LOWER = np.array([0,0,0])
GREEN_UPPER = np.array([179, 255, 60])
# Define a decorator as a subclass of Annotator; displays the keypoint
class BoxAnnotator(cozmo.annotate.Annotator):
cube = None
def apply(self, image, scale):
d = ImageDraw.Draw(image)
bounds = (0, 0, image.width, image.height)
if BoxAnnotator.cube is not None:
#double size of bounding box to match size of rendered image
BoxAnnotator.cube = np.multiply(BoxAnnotator.cube,2)
#define and display bounding box with params:
#msg.img_topLeft_x, msg.img_topLeft_y, msg.img_width, msg.img_height
box = cozmo.util.ImageBox(BoxAnnotator.cube[0]-BoxAnnotator.cube[2]/2,
BoxAnnotator.cube[1]-BoxAnnotator.cube[2]/2,
BoxAnnotator.cube[2], BoxAnnotator.cube[2])
cozmo.annotate.add_img_box_to_image(image, box, "green", text=None)
BoxAnnotator.cube = None
async def run(robot: cozmo.robot.Robot):
robot.world.image_annotator.annotation_enabled = False
robot.world.image_annotator.add_annotator('box', BoxAnnotator)
robot.camera.image_stream_enabled = True
robot.camera.color_image_enabled = True
robot.camera.enable_auto_exposure = True
gain,exposure,mode = 390,3,1
try:
while True:
event = await robot.world.wait_for(cozmo.camera.EvtNewRawCameraImage, timeout=30) #get camera image
if event.image is not None:
image = cv2.cvtColor(np.asarray(event.image), cv2.COLOR_BGR2RGB)
if mode == 1:
robot.camera.enable_auto_exposure = True
else:
robot.camera.set_manual_exposure(exposure,fixed_gain)
#find the cube
cube = find_cube(image, YELLOW_LOWER, YELLOW_UPPER)
print(cube)
BoxAnnotator.cube = cube
################################################################
# Todo: Add Motion Here
################################################################
except KeyboardInterrupt:
print("")
print("Exit requested by user")
except cozmo.RobotBusy as e:
print(e)
#cv2.destroyAllWindows()
if __name__ == '__main__':
cozmo.run_program(run, use_viewer = True, force_viewer_on_top = True)
| [
"noreply@github.com"
] | Jdbb12899.noreply@github.com |
9231a56c3e39822d15325531e790fc893122c33e | 013979332f630b8fef51b7f09972ca40bf9877fb | /1011.py | 6c10fc99646a9034095a82fa323684c51b01eeee | [] | no_license | Shayaike/URI-Online-Judge | f128927976c776df07a9569da4a4ec8ae01d8728 | c45eb2c3e5213d3c255dc02286377e0993772f2a | refs/heads/master | 2020-06-22T05:40:29.814939 | 2019-09-10T06:31:45 | 2019-09-10T06:31:45 | 197,647,883 | 2 | 0 | null | 2019-09-10T06:31:48 | 2019-07-18T19:49:28 | Python | UTF-8 | Python | false | false | 105 | py | radius = R = float(input())
pi = 3.14159
VOLUME = (4/3)*pi*R**3
print("VOLUME = {0:.3f}".format(VOLUME))
| [
"noreply@github.com"
] | Shayaike.noreply@github.com |
a7f551d56f07dfef4a8952ac09f766fb8484bca2 | 71e823659d516f25a8e60c925d14da1cd46a5853 | /GamerZone/GamerZone/asgi.py | f9d6af86fdfe7cae04210cd831b160de3d77ab5d | [] | no_license | deepkhurana1999/Hands-On-with-Django-Rest-Framework | 2acf1fe8621ffa878933448e34c9f1717f6bd2cc | 3cd144afb034a6b680a68883d6e27a74ca6ad8d6 | refs/heads/master | 2022-12-29T12:15:38.917197 | 2020-06-22T11:34:30 | 2020-06-22T11:34:30 | 273,720,230 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 395 | py | """
ASGI config for GamerZone project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.0/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'GamerZone.settings')
application = get_asgi_application()
| [
"marverev@gmail.com"
] | marverev@gmail.com |
59aaf910c020b33c4d9bb57563bb0d650b19d3ed | 844a78eaad43325de23f8ec52c4a853e4bb2e447 | /entrega/v5/canales_201729241.py | b89f6700be3a017484d9ed93e38f5585b9f1fa47 | [] | no_license | sergiocanalesm1/scientific_programming | 869b77b288d975c5a415cf0e2a20ac5d13d242c2 | 3c3539befed0a4bd86f6a0c1152e5045065f6231 | refs/heads/master | 2023-01-12T11:45:16.340684 | 2020-11-18T20:52:49 | 2020-11-18T20:52:49 | 236,985,459 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,226 | py | import numpy as np
import sympy as sym
import scipy.optimize as opt
import matplotlib.pyplot as plt
def derivadas_parciales(fi,fii):
return sym.lambdify([x1,x2],fi.diff(x1),"numpy"),sym.lambdify([x1,x2],fi.diff(x2),"numpy"),sym.lambdify([x1,x2],fii.diff(x1),"numpy"),sym.lambdify([x1,x2],fii.diff(x2),"numpy")
def evaluar(fi,fii):
return sym.lambdify([x1,x2],fi,"numpy"),sym.lambdify([x1,x2],fii,"numpy")
def jaco(a,b,fi,fii):
dfix1, dfix2, dfiix1, dfiix2 = derivadas_parciales(fi, fii)
jac = np.zeros([2,2])
jac[0,0] = dfix1(a,b)
jac[0,1] = dfix2(a,b)
jac[1,0] = dfiix1(a,b)
jac[1,1] = dfiix2(a,b)
return jac
def multivariable_solve(fi,fii,tolx=10**-5,tolf=10**-5,x1i0 = 5.0,x2i0 = 2.0):
fi_e,fii_e = evaluar(fi,fii)#variables para poder evaluarlas expresiones simbolicas
iter = 0
while True:
iter += 1
A = jaco(x1i0,x2i0,fi,fii)
b = np.zeros([2,1])
b[0] = -fi_e(x1i0, x2i0)
b[1] = -fii_e(x1i0, x2i0)
delta_x = np.linalg.solve(A,b)
x_1 = np.float(x1i0 + delta_x[0])
x_2 = np.float(x2i0 + delta_x[1])
if np.abs(x_1-x1i0) <= tolx and np.abs(x_2-x2i0) <= tolx:
break
if np.abs(fi_e(x_1,x_2)) <= tolf and np.abs(fii_e(x_1,x_2)) <= tolf:
break
x1i0 = x_1
x2i0 = x_2
print("con {} iteraciones y {} de tolerancia,\nraiz x1: {} raiz x2: {}\n".format(iter,tolx,x_1,x_2))
x1 = sym.Symbol("x1")
x2 = sym.Symbol("x2")
f1 = 3.0*sym.exp(-(x1**2))-5.0*(x2)**(1.0/3.0)+6.0
f2 = 3.0*x1 + 0.5*(x2)**(1.0/4.0)-15.0
f3 = x1**2+x2-3
f4 = (x1-2)**2+(x2+3)**2-4
def lineas_contorno(fi,fii):
delta = 0.1
x_1 = np.arange(-2.0, 4.0, delta)
x_2 = np.arange(-2.0, 4.0, delta)
X1,X2 = np.meshgrid(x_1,x_2)
fi_e, fii_e = evaluar(fi, fii)
plt.figure()
c1 = plt.contour(X1,X2,fi_e(X1,X2),colors="b")
c2 = plt.contour(X1, X2, fii_e(X1, X2), colors="r")
plt.clabel(c1)
plt.clabel(c2)
plt.grid(1)
plt.show()
#multivariable_solve(f1,f2)
#lineas_contorno(f1,f2)
lineas_contorno(f3,f4)
multivariable_solve(f3,f4,x1i0=2,x2i0=-1)#el intervalo se definio con la grafica anterior
multivariable_solve(f3,f4,tolx=10**-10,tolf=10**-10)
| [
"sergiocanalesm1@gmail.com"
] | sergiocanalesm1@gmail.com |
993bf38bc266355361aea04fa270bedcbfd4a397 | a53069f027d47d7ca408a112f954ba00d08c57dd | /algo_1/tp3/clase_tortuga.py | 6a057c1aaaa5423808421b3a01f97ca346b06a53 | [] | no_license | joacobetz/Facultad | abb17724b88b2f34bbb7a626677d188de04202d2 | 072bae534ab246c32fca34f1cf9e79bbb5d8bfea | refs/heads/master | 2023-01-02T02:07:11.836806 | 2020-10-30T21:45:58 | 2020-10-30T21:45:58 | 308,749,593 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,281 | py | import math
class _Pluma:
'''representa una pluma con atributos de tinta, ancho de trazo
y si esta levantada o no'''
def __init__(self):
'''crea una pluma con ancho igual 1, color negro y en posicion
de escritura'''
self.ancho = 1
self.color = 'black'
self.escribe = True
class Tortuga:
'''representa una tortuga con atributos de posicion, orientacion y pluma'''
def __init__(self):
'''crea una tortuga con posicion en (0,0), orientacion de 0
radianes y pluma con los atrubutos que vengan por defecto'''
self.posicion = (0,0)
self.orientacion = 0
self.pluma = _Pluma()
def derecha(self, angulo):
'''recibe un angulo y cambia la orientacion de la tortuga
el ángulo dado hacia la derecha'''
self.orientacion -= math.radians(angulo)
def izquierda(self, angulo):
'''recibe un angulo y cambia la orientacion de la tortuga
el ángulo dado hacia la izquierda'''
self.orientacion += math.radians(angulo)
def pluma_arriba(self):
'''cambia el atributo pluma de la tortuga para que no escriba'''
self.pluma.escribe = False
def pluma_abajo(self):
'''cambia el atributo pluma de la tortuga para que escriba'''
self.pluma.escribe = True
def adelantar(self,n=1):
'''recibe la cantidad (1 por defecto) de espacio a recorrer y avanza
dicha cantidad en la direccion en que esté orientada la Tortuga'''
x, y = self.posicion
x += n*math.cos(self.orientacion)
y += n*math.sin(self.orientacion)
self.posicion = (x,y)
def angulo(self):
'''devuelve la orientacion en grados de la tortuga'''
return math.degrees(self.orientacion)
def ver_posicion(self):
'''devuelve la posicion de la tortuga'''
return self.posicion
def pluma_esta_abajo(self):
'''devuelve si la pluma escribe o no'''
return self.pluma.escribe
def color(self):
'''devuelve el color de la pluma'''
return self.pluma.color
def ancho(self):
'''devuelve el ancho de la pluma'''
return self.pluma.ancho | [
"jbetz@fi.uba.ar"
] | jbetz@fi.uba.ar |
3e2a5e153539c0203cf07009c6a9c8f741abb8d8 | 130a480386b2673839ba73fdfdb928778eed269a | /vision-detector_qh/test/test_wwl/de_baseline_overlap_0.3.py | 1b47bdc46457d568afe353343bcc2d9286311e63 | [] | no_license | qinhuan/scrip_in_didi | 335c1ca81abe5a2f4a48433ec01f7c9c7e925e7a | 8f0bf460c49760c289eca8072df721a5420e91f2 | refs/heads/master | 2020-12-30T12:45:16.562282 | 2017-05-26T08:08:35 | 2017-05-26T08:08:35 | 91,359,142 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 10,393 | py | import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import array
# Make sure that caffe is on the python path:
caffe_root = '/home/work/qinhuan/git/caffe-ssd'
import os
os.chdir(caffe_root)
import sys
sys.path.insert(0, 'python')
import caffe
from google.protobuf import text_format
from caffe.proto import caffe_pb2
def get_labelname(labelmap, labels):
num_labels = len(labelmap.item)
labelnames = []
if type(labels) is not list:
labels = [labels]
for label in labels:
found = False
for i in xrange(0, num_labels):
if label == labelmap.item[i].label:
found = True
labelnames.append(labelmap.item[i].display_name)
break
assert found == True
return labelnames
def non_max_suppression_fast(x1, y1, x2, y2, conf, overlapThresh):
# if there are no boxes, return an empty list
if len(x1) == 0:
return []
# initialize the list of picked indexes
pick = []
# compute the area of the bounding boxes and sort the bounding
# boxes by the bottom-right y-coordinate of the bounding box
area = (x2 - x1 + 1) * (y2 - y1 + 1)
idxs = np.argsort(conf)
# keep looping while some indexes still remain in the indexes
# list
while len(idxs) > 0:
# grab the last index in the indexes list and add the
# index value to the list of picked indexes
last = len(idxs) - 1
i = idxs[last]
pick.append(i)
# find the largest (x, y) coordinates for the start of
# the bounding box and the smallest (x, y) coordinates
# for the end of the bounding box
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
# compute the width and height of the bounding box
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
# compute the ratio of overlap
overlap = (w * h) / (area[idxs[:last]] + area[i] - (w * h))
# delete all indexes from the index list that have
idxs = np.delete(idxs, np.where(overlap > overlapThresh)[0])
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap1 = (w * h) / (area[idxs[:last]])
idxs = np.delete(idxs, np.where(overlap1 > 0.8)[0])
xx1 = np.maximum(x1[i], x1[idxs[:last]])
yy1 = np.maximum(y1[i], y1[idxs[:last]])
xx2 = np.minimum(x2[i], x2[idxs[:last]])
yy2 = np.minimum(y2[i], y2[idxs[:last]])
w = np.maximum(0, xx2 - xx1 + 1)
h = np.maximum(0, yy2 - yy1 + 1)
overlap2 = (w * h) / (area[i])
idxs = np.delete(idxs, np.concatenate(([last],
np.where(overlap2 > 0.8)[0])))
# return only the bounding boxes that were picked using the
# integer data type
return pick
if __name__ == '__main__':
caffe.set_device(2)
caffe.set_mode_gpu()
# load PASCAL VOC labels
labelmap_file = '/home/work/qinhuan/git/vision-detector/test/test_wwl/toqinhuan/labelmap.prototxt'
file = open(labelmap_file, 'r')
labelmap = caffe_pb2.LabelMap()
text_format.Merge(str(file.read()), labelmap)
model_def = '/home/work/qinhuan/git/vision-detector/test/test_wwl/toqinhuan/baseline/deploy.prototxt'
model_weights = '/home/work/qinhuan/git/vision-detector/test/test_wwl/toqinhuan/baseline/final.caffemodel'
net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1))
transformer.set_mean('data', np.array([114,115,108])) # mean pixel
transformer.set_raw_scale('data', 255) # the reference model operates on images in [0,255] range instead of [0,1]
transformer.set_channel_swap('data', (2,1,0)) # the reference model has channels in BGR order instead of RGB
net.blobs['data'].reshape(1,3,128,512)
fout = open('/home/work/qinhuan/git/vision-detector/test/test_wwl/toqinhuan/txts/res_baseline_4_nmsv2_0.3_andoverlap12_0.8.txt', 'w')
img_dir = '/home/work/tester/data/KITTI'
test_list = '/home/work/qinhuan/git/vision-detector/test/test_wwl/toqinhuan/list.txt'
with open(test_list) as f:
while True:
line = f.readline()
if line == '':
break
line = line.strip()
print line
img = caffe.io.load_image(img_dir + '/images/' + line)
yl = int(img.shape[0] * 0.3)
yr = int(img.shape[0] * 0.7)
xl1 = 0
xr1 = int(img.shape[1] * (1.0 / 3 + 0.1))
xl2 = int(img.shape[1] * (1.0 / 3 - 0.1))
xr2 = int(img.shape[1] * (2.0 / 3 + 0.1))
xl3 = int(img.shape[1] * (2.0 / 3 - 0.1))
xr3 = int(img.shape[1])
t1 = transformer.preprocess('data', img)
t2 = transformer.preprocess('data', img[yl:yr, xl1:xr1])
t3 = transformer.preprocess('data', img[yl:yr, xl2:xr2])
t4 = transformer.preprocess('data', img[yl:yr, xl3:xr3])
Label = []
Conf = []
Xmin = []
Ymin = []
Xmax = []
Ymax = []
cnt1 = 0
cnt2 = 0
cnt3 = 0
cnt4 = 0
for index, t in enumerate([t1, t2, t3, t4]):
net.blobs['data'].data[...] = t
# Forward pass.
detections = net.forward()['detection_out']
# Parse the outputs.
det_label = detections[0,0,:,1]
det_conf = detections[0,0,:,2]
det_xmin = detections[0,0,:,3]
det_ymin = detections[0,0,:,4]
det_xmax = detections[0,0,:,5]
det_ymax = detections[0,0,:,6]
# Get detections with confidence higher than 0.6.
top_indices = [i for i, conf in enumerate(det_conf) if conf >= 0.0]
top_conf = det_conf[top_indices]
top_label_indices = det_label[top_indices].tolist()
top_labels = get_labelname(labelmap, top_label_indices)
top_xmin = det_xmin[top_indices]
top_ymin = det_ymin[top_indices]
top_xmax = det_xmax[top_indices]
top_ymax = det_ymax[top_indices]
#import pdb;
#pdb.set_trace()
for i in xrange(top_conf.shape[0]):
if top_labels[i] != 'Car':
continue
if index == 0:
xmin = (top_xmin[i] * img.shape[1])
ymin = (top_ymin[i] * img.shape[0])
xmax = (top_xmax[i] * img.shape[1])
ymax = (top_ymax[i] * img.shape[0])
if (ymax - ymin) / img.shape[0] <= 0.2:
continue
cnt1 = cnt1 + 1
Label.append(top_labels[i])
Conf.append(top_conf[i])
Xmin.append(xmin)
Ymin.append(ymin)
Xmax.append(xmax)
Ymax.append(ymax)
elif index == 1:
xmin = (top_xmin[i] * (xr1 - xl1))
ymin = (top_ymin[i] * (yr - yl)) + yl
xmax = (top_xmax[i] * (xr1 - xl1))
ymax = (top_ymax[i] * (yr - yl)) + yl
if (ymax - ymin) / img.shape[0] >= 0.25:
continue
cnt2 = cnt2 + 1
Label.append(top_labels[i])
Conf.append(top_conf[i])
Xmin.append(xmin)
Ymin.append(ymin)
Xmax.append(xmax)
Ymax.append(ymax)
elif index == 2:
xmin = (top_xmin[i] * (xr2 - xl2)) + xl2
ymin = (top_ymin[i] * (yr - yl)) + yl
xmax = (top_xmax[i] * (xr2 - xl2)) + xl2
ymax = (top_ymax[i] * (yr - yl)) + yl
if (ymax - ymin) / img.shape[0] >= 0.25:
continue
cnt3 = cnt3 + 1
Label.append(top_labels[i])
Conf.append(top_conf[i])
Xmin.append(xmin)
Ymin.append(ymin)
Xmax.append(xmax)
Ymax.append(ymax)
elif index == 3:
xmin = (top_xmin[i] * (xr3 - xl3)) + xl3
ymin = (top_ymin[i] * (yr - yl)) + yl
xmax = (top_xmax[i] * (xr3 - xl3)) + xl3
ymax = (top_ymax[i] * (yr - yl)) + yl
if (ymax - ymin) / img.shape[0] >= 0.25:
continue
cnt4 = cnt4 + 1
Label.append(top_labels[i])
Conf.append(top_conf[i])
Xmin.append(xmin)
Ymin.append(ymin)
Xmax.append(xmax)
Ymax.append(ymax)
overlapThresh = 0.3
ids = non_max_suppression_fast(np.array(Xmin), np.array(Ymin), np.array(Xmax), np.array(Ymax), np.array(Conf), overlapThresh)
for i in ids:
fout.write(line + ' ')
fout.write('%s %f %f %f %f %f' % (Label[i], Conf[i], Xmin[i], Ymin[i], Xmax[i], Ymax[i]))
fout.write('\n')
#print cnt1,cnt2,cnt3,cnt4
#print len(ids)
#print ids
#exit()
fout.close()
| [
"xiaoyu1qh1@163.com"
] | xiaoyu1qh1@163.com |
ab4e25415c076d9954b016d8298ebe6fcfdaeff6 | 0a1a3aa6d923d03f1bbfe51b6391c8f6c1f05948 | /Leetcode_env/2020_02_22/Convert Binary Number in a Linked List to Integer.py | d6e3a56f9d6636ccd04f8e73a8de7a8df52e93ec | [] | no_license | sam1208318697/Leetcode | 65256783c315da0b5cb70034f7f8a83be159d6b4 | 68cb93a86c11be31dc272a4553dd36283b6a5ff7 | refs/heads/master | 2020-06-06T05:24:39.163778 | 2020-03-01T09:18:36 | 2020-03-01T09:18:36 | 192,649,254 | 5 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,103 | py | # 1290.二进制链表转整数
#
# 给你一个单链表的引用结点head。链表中每个结点的值不是0就是1。已知此链表是一个整数数字的二进制表示形式。
# 请你返回该链表所表示数字的十进制值 。
#
# 示例1:
# 输入:head = [1, 0, 1]
# 输出:5
# 解释:
# 二进制数(101)转化为十进制数(5)
#
# 示例2:
# 输入:head = [0]
# 输出:0
#
# 示例3:
# 输入:head = [1]
# 输出:1
#
# 示例4:
# 输入:head = [1, 0, 0, 1, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0]
# 输出:18880
#
# 示例5:
# 输入:head = [0, 0]
# 输出:0
#
# 提示:
# 链表不为空。
# 链表的结点总数不超过30。
# 每个结点的值不是0就是1。
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def getDecimalValue(self, head: ListNode) -> int:
res = ""
while head.next != None:
res = res + str(head.val)
head = head.next
res = "0b" + res + str(head.val)
return int(res,2)
| [
"1208318697@qq.com"
] | 1208318697@qq.com |
2792bb701213df5461440277bedb477d20d34073 | 5565bf4b8b06452934be9080be6d19c4b3682ae8 | /函数等-进阶/func_test.py | 368615a41fc9580927bac58dba7acdbfa316dd5a | [] | no_license | congpq/Python-Stack | 52ca8d1319ba2c0823a581fd7b07bd92fc16c667 | a7211a3805810a255d7f3750715f2d1596fd65e8 | refs/heads/master | 2020-12-30T12:45:26.471979 | 2017-06-08T15:03:31 | 2017-06-08T15:03:31 | 91,359,651 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 422 | py | #!/usr/bin/env python
# -*- coding:utf-8 -*-
import time
def logger():
time_format = '%Y-%m-%d %X'
time_current = time.strftime(time_format)
with open('a.text', 'a+') as f:
f.write('%s end action\n' %time_current)
def test1():
print('in the test1')
logger()
def test2():
print('in the test2')
logger()
def test3():
print('in the test3')
logger()
test1()
test2()
test3() | [
"congpq@yeah.net"
] | congpq@yeah.net |
6eb0d84530b500e74e8e9edde1228aadfe50f8ea | 8966d83bf85d4738d644624bd7b7063e8534a515 | /data.d/code/python/example/wxpython/frame_boxsizer_horizontal.py | cb90c4dd5a0b24d8d1c6b59f67b455c564814a00 | [] | no_license | taka16a23/.emacs.d | 84a77c04c4d5e00c089cb01cc42a94b884f729ae | ac5794e2594037e316d5fe9cf6bf1fd20b44a726 | refs/heads/master | 2023-05-29T06:25:38.449977 | 2023-05-16T22:08:04 | 2023-05-16T22:08:04 | 82,106,233 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 676 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from time import sleep
import wx
def _main():
app = wx.App()
frame = wx.Frame(None, wx.ID_ANY, 'test Frameme', size=(400, 200))
panel = wx.Panel(frame, wx.ID_ANY)
panel.SetBackgroundColour('#AFAFAF')
button_1 = wx.Button(panel, wx.ID_ANY, 'botton1')
button_2 = wx.Button(panel, wx.ID_ANY, 'botton2')
button_3 = wx.Button(panel, wx.ID_ANY, 'botton3')
layout = wx.BoxSizer(wx.HORIZONTAL)
layout.Add(button_1)
layout.Add(button_2)
layout.Add(button_3)
panel.SetSizer(layout)
frame.Show()
app.MainLoop()
if __name__ == '__main__':
_main()
| [
"root@qu"
] | root@qu |
c0609462eb10d5ff1b7977890cfb89e92f82bb45 | 6e7d73d1ef64f93f7537ed0c4724de4bde12a3df | /SpyderFiles/heaviside.py | 83b41c0c49b47828aac7e91bc1d77a1229e355e6 | [] | no_license | AndreaCano/machineLearning | 30cc91a5049e25406a1e36a18687474e4334b1e0 | f1a95f82353f02b94fb629a560ffbff3927f393e | refs/heads/master | 2020-03-30T13:58:54.232300 | 2018-10-28T22:31:38 | 2018-10-28T22:31:38 | 151,295,839 | 0 | 0 | null | 2018-10-03T23:37:46 | 2018-10-02T17:33:38 | Jupyter Notebook | UTF-8 | Python | false | false | 352 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 14:37:50 2018
@author: Andrea
"""
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn import datasets
# 0 if x < 0
#heaviside(x, h0) = h0 if x == 0
# 1 if x > 0
np.heaviside([-1.5, 0, 2.0], .5)
| [
"andiecano@gmail.com"
] | andiecano@gmail.com |
f79a1867e21ee14e195dd10b6e0687e32ca3254c | cf2fbcb7a7687aabe4f1a1e0f7cd30902508e0ba | /sortedSecondlargestsecondsmallest.py | 5c70375e64eee18860ed13919bc62cc1e85ceb97 | [] | no_license | rasik-hasan/Problem-solving-with-python | 7550d7badef6165de6d6bc87a2c6a1ba689542da | 2585b70829e1d341b1bac00817d933c6aa90d770 | refs/heads/master | 2021-10-27T05:11:31.526875 | 2019-04-16T04:09:48 | 2019-04-16T04:09:48 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 406 | py | # -*- coding: utf-8 -*-
"""
Created on Sat Apr 13 10:55:22 2019
@author: rasik
"""
def secondLargestandSecondsmallest(arr):
sortedlist = sorted(arr)
return sortedlist[1], sortedlist[len(sortedlist)-2]
arr = [9,2,3,4,5,6,7,8,1]
small, large = secondLargestandSecondsmallest(arr)
print("second largest", large, "second smallest" , small) | [
"rasik.hasan@yahoo.com"
] | rasik.hasan@yahoo.com |
0ebc71d400a7c12c017e65617a5875589ed83454 | e4d0daf192f7967d6142b4486680cf4c7b8b4e32 | /patient/migrations/0018_auto__add_field_immunizationhistory_others_injection.py | db87b7a7c1a82593436bee2a12752d4b50a46022 | [] | no_license | aazhbd/medical_info01 | 0d623454a74b334020f2ccda48064cf229c732c4 | b08961089e6a7cdc567f879ab794e45067171418 | refs/heads/master | 2021-01-10T14:49:19.057064 | 2015-10-15T17:08:22 | 2015-10-15T17:08:22 | 44,332,500 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 29,812 | py | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ImmunizationHistory.others_injection'
db.add_column(u'patient_immunizationhistory', 'others_injection',
self.gf('django.db.models.fields.TextField')(default='', blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'ImmunizationHistory.others_injection'
db.delete_column(u'patient_immunizationhistory', 'others_injection')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.additionalpatientinformation': {
'Meta': {'object_name': 'AdditionalPatientInformation'},
'alcohol': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cigarettes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'cooking_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'literate': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'occupation': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'other_harmful_substances': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'psychological_stress': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'toilet_facilities': ('django.db.models.fields.CharField', [], {'max_length': '20'})
},
u'patient.familymedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'FamilyMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.guardian': {
'Meta': {'object_name': 'Guardian'},
'contact_number': ('django.db.models.fields.CharField', [], {'max_length': '15'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'educational_level': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'home_address': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'job': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'relation': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.gynaecologicalhistory': {
'Meta': {'object_name': 'GynaecologicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_last_pap_smear': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'method_of_birth_control': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'previous_surgery': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousSurgery']"}),
'result_pap_smear': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.immunizationhistory': {
'Meta': {'object_name': 'ImmunizationHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_injection': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'tetanus_toxoid1': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid2': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'tetanus_toxoid3': ('django.db.models.fields.DateTimeField', [], {'null': 'True'}),
'vaccination': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.laboratorytest': {
'Meta': {'object_name': 'LaboratoryTest'},
'blood_group': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'hemoglobin': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'serological_test_for_syphilis': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'urinalysis': ('django.db.models.fields.CharField', [], {'max_length': '2'})
},
u'patient.medicalhistory': {
'Meta': {'object_name': 'MedicalHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'family_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.FamilyMedicalHistory']"}),
'gynaecological_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.GynaecologicalHistory']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'immunization_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ImmunizationHistory']"}),
'menstrual_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.MenstrualHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.ObstetricHistory']"}),
'past_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PastMedicalHistory']"}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'present_medical_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PresentMedicalHistory']"})
},
u'patient.menstrualhistory': {
'Meta': {'object_name': 'MenstrualHistory'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'day_of_visit': ('django.db.models.fields.DateField', [], {}),
'expected_date_of_delivery': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_menstrual_periods': ('django.db.models.fields.DateField', [], {}),
'menstrual_cycle': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'poa_by_lmp': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'patient.obstetrichistory': {
'Meta': {'object_name': 'ObstetricHistory'},
'check_if_you_have_been_miscarriages': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'check_if_you_have_been_pregnant': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'list_previous_obstetric_history': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PreviousObstetricHistory']"}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.pastmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PastMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.patientinformation': {
'Meta': {'object_name': 'PatientInformation'},
'address': ('django.db.models.fields.TextField', [], {}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'marital_status': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'operator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'telephone_number': ('django.db.models.fields.CharField', [], {'max_length': '15'})
},
u'patient.prescription': {
'Meta': {'object_name': 'Prescription'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_prescription': ('django.db.models.fields.TextField', [], {}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"})
},
u'patient.presentmedicalhistory': {
'HIV_status_if_known': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'Meta': {'object_name': 'PresentMedicalHistory'},
'chronical_renal_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'diabetes_melitus': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'epilepsy': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'haemorrhage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'heart_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hepatitis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hypertension': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kidney_disease': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'liver_problems': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'malaria': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pelvic_backinjuries': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'rhesus_d_antibodies': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'seizures': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sexually_transmitted_infection': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sickle_cell_trait': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tuberculosis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'urinary_tract_surgeries': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.previousobstetrichistory': {
'Meta': {'object_name': 'PreviousObstetricHistory'},
'age_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'birth_weight': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'length_of_pregnancy': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_baby': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'obstetrical_operation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'periods_of_exclusive_feeding': ('django.db.models.fields.CharField', [], {'max_length': '30'}),
'problems': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'sex': ('django.db.models.fields.CharField', [], {'max_length': '1'}),
'types_of_delivery': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'year': ('django.db.models.fields.DateField', [], {})
},
u'patient.previoussurgery': {
'Meta': {'object_name': 'PreviousSurgery'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'endometriosis': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'fibrocystic_breasts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'ovarian_cysts': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'uterine_fibroids': ('django.db.models.fields.BooleanField', [], {'default': 'True'})
},
u'patient.report': {
'Meta': {'object_name': 'Report'},
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'diabetis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'hiv': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'pregnancy': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'patient.routinecheckup': {
'Meta': {'object_name': 'Routinecheckup'},
'abdominal_changes': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'blood_pressure': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'chest_and_heart_auscultation': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'fetal_movement': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_of_examiner': ('django.db.models.fields.CharField', [], {'max_length': '50'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'symptom_events': ('django.db.models.fields.CharField', [], {'max_length': '300'}),
'uterine_height': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'vaginal_examination': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'visit': ('django.db.models.fields.CharField', [], {'max_length': '2'}),
'weight': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.signanaemia': {
'Meta': {'object_name': 'Signanaemia'},
'conjunctiva': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'fingernails': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'oral_mucosa': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'others_please_state': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'pale_complexion': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'shortness_of_breath': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'tip_of_tongue': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
u'patient.ultrasoundscanning': {
'AC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'BPD': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'CRL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'FL': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'HC': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'Meta': {'object_name': 'UltrasoundScanning'},
'amount_of_amniotic_fluid': ('django.db.models.fields.IntegerField', [], {'max_length': '10'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'date': ('django.db.models.fields.DateField', [], {}),
'gestation_age': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'name_examiner': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'patient': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['patient.PatientInformation']"}),
'position_of_the_baby': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'position_of_the_placenta': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'saved_ultrasound_image': ('django.db.models.fields.files.FileField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
}
}
complete_apps = ['patient'] | [
"aazhbd@yahoo.com"
] | aazhbd@yahoo.com |
7f3cbc4e65e2611b1d11249e6d5a816518e98a10 | 1e0203f40d4cffed0d64449edeaea00311f4b732 | /target-sum/solution.py | 6cc114fe33f51522bfd62d35efa0561c3ea2be9c | [] | no_license | childe/leetcode | 102e87dd8d918877f64e7157d45f3f45a607b9e4 | d2e8b2dca40fc955045eb62e576c776bad8ee5f1 | refs/heads/master | 2023-01-12T01:55:26.190208 | 2022-12-27T13:25:27 | 2022-12-27T13:25:27 | 39,767,776 | 2 | 1 | null | 2020-10-13T01:29:05 | 2015-07-27T10:05:04 | Python | UTF-8 | Python | false | false | 603 | py | class Solution(object):
def findTargetSumWays(self, nums, S):
"""
:type nums: List[int]
:type S: int
:rtype: int
"""
if sum(nums) < S:
return 0
s = sum(nums) - S
if s % 2 == 1:
return 0
P = int(s / 2)
dp = [1] + [0] * P
for n in nums:
for i in range(len(dp)-1, -1, -1):
if i + n <= P:
dp[i+n] += dp[i]
return dp[P]
def main():
s = Solution()
print(s.findTargetSumWays([1, 1], 0))
if __name__ == '__main__':
main()
| [
"rmself@qq.com"
] | rmself@qq.com |
48c35feb428e6cebfbe9baf763e7da4d47cae069 | dedac600d4f9a89f426e34cf93dd8385233527a5 | /robot.py | 42f6995b20090f2157f013e4ec87f8965d16f862 | [] | no_license | dmarcelinobr/autonomous-trader | 59d6f6b9420202cab90332547081dafb8c85bca2 | 9919dd7d4b72ba60bd15b9c51789dffa6865f6d6 | refs/heads/main | 2023-03-24T14:50:05.805654 | 2021-03-17T13:09:09 | 2021-03-17T13:09:09 | 344,959,786 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,896 | py | ### Importando as bibliotecas.
import pandas as pd ## manipulação de dataframes em python
import time ## manipulação de tempo
import numpy as np ## manipulação de arrays e vetores
import talib as ta ## criação de indicadores técnicos
from datetime import datetime ## manipulação de datas em pyton
import datetime as dt ## manipulação de datas em python
from buy_function import buy ## função de compra de ativos
from sell_function import sell ## função de vendas de ativos
from download_data import download_data ## função para download dos dados em tempo real
from config_param import config_param ## função de configuração da estratégia de compra/venda
import warnings ## filtros para avisos
warnings.filterwarnings('ignore') ## ignorar aviso
import MetaTrader5 as mt5 ## biblioteca do MT5 para Python
import pytz ## manipulação de time zones em python
##A) Iniciando uma sessão do MT5 com um looping
RUN=1
while RUN==1:
# Estabelecendo uma conexão com o Terminal do MetaTrader5
if not mt5.initialize():
print("initialize() failed, error code =",mt5.last_error())
quit()
## Definindo o ativo usado no robô
Ativo='CCMH21'
## Ajustando a quantidade de lotes que serão comprados/vendidos
lot= 1
## Ajustando o timeframe (M1 para 1 minuto / M5 para 5 minutos / D1 para diário
timeframe = mt5.TIMEFRAME_D
## Carregando as cotações em tempo real através da função download_data
xfh = download_data(Ativo,timeframe) ## Parâmetros: Ativo e o timeframe configurado
#Criando um novo objeto que recebe o dataframe com os dados em tempo real
stocks = xfh.copy()
# PARTE IV - CRIANDO A ESTRATÉGIA
#ETAPA II) Suavização da Série
# a) Suavização da série
suavização = 5
# b) Gerando as features OHLC suavizadas
stocks['EMAC'] = ta.EMA(stocks['Adj Close'], timeperiod=suavização) # Suavização da série de fechamento
stocks['EMAO'] = ta.EMA(stocks['Open'], timeperiod=suavização) # Suavização da série de abertura
stocks['EMAH'] = ta.EMA(stocks['High'], timeperiod=suavização) # Suavização da série de Altas
stocks['EMAL'] = ta.EMA(stocks['Low'], timeperiod=suavização) # Suavização da série de Baixas
stocks['EMAV'] = ta.EMA(stocks['Volume'], timeperiod=suavização) # Suavização da série de Volume
#ETAPA III)
##-- Gerando os Osciladores e Indicadores de Tendência
# 1) RSI - Relative Strength Index
stocks['RSI'] = ta.RSI(stocks['EMAC'], timeperiod=14)
# 2) MACD - Moving Average Convergence/Divergence
stocks['macd'], stocks['macdsignal'], stocks['macdhist'] = ta.MACD(stocks['EMAC'], fastperiod=12, slowperiod=26, signalperiod=9)
# 3) Parabolic SAR
stocks['SAR'] = ta.SAR(stocks['EMAH'], stocks['EMAL'], 0.02, 0.3)
stocks['SAREXT'] = ta.SAREXT(stocks['EMAH'], stocks['EMAL'], 0.02, 0.3)
# 4) CCI - Commodity Channel Index
stocks['CCI'] = ta.CCI(stocks['EMAH'], stocks['EMAL'], stocks['EMAC'], timeperiod=14)
# 5) SMA - Single Moving Average
sht = 5
lng = 22
stocks['SHT'] = stocks['Adj Close'].rolling(window=sht).mean()
stocks['LNG'] = stocks['Adj Close'].rolling(window=lng).mean()
# 6) Bollinger Bands
stocks['UPP'], stocks['MIDD'], stocks['LOW'] = ta.BBANDS(stocks['EMAC'], timeperiod=6, nbdevup=4, nbdevdn=4, matype=0)
# 7) Top & Bottom
stocks['Close20d'] = stocks['Adj Close'].shift(20)
stocks['Close30d'] = stocks['Adj Close'].shift(30)
stocks['Close40d'] = stocks['Adj Close'].shift(40)
stocks['Close50d'] = stocks['Adj Close'].shift(50)
stocks['Close60d'] = stocks['Adj Close'].shift(60)
# 8) TOP & BOTTOM
Lenght = 60
stocks['MIN_' + str(Lenght)] = list(np.zeros(len(stocks)))
stocks['MAX_' + str(Lenght)] = list(np.zeros(len(stocks)))
for i in range(len(stocks) - Lenght):
stocks['MIN_' + str(Lenght)][i + Lenght] = stocks['Adj Close'][i:i + Lenght].min()
stocks['MAX_' + str(Lenght)][i + Lenght] = stocks['Adj Close'][i:i + Lenght].max()
stocks.dropna(axis=0, inplace=True)
#===================================================================================================================
# A ESTRATÉGIA DE COMPRA/VENDA DEVE SER INSERIDA NO BLOCO ABAIXO
#===================================================================================================================
stocks['Status'] = stocks['SHT'] > stocks['LNG']
# ==================================================================================================================
# ==================================================================================================================
# Executando a função 'config_param' e passando a série
stocks=config_param(stocks, sht, lng, Lenght)
## Criando variáveis com os últimos resultados os indicadores estratégicos
Var= stocks['action'].tail(1).values
Var=Var[0]
#Var7 = stocks['has_action'].tail(1).values
#Var7 = Var7[0]
Var1 = stocks['Adj Close'].tail(1).values
Var1 = Var1[0]
Var2 = stocks['MIN_' + str(Lenght)].tail(1).values
Var2 = Var2[0]
Var3 = stocks['MAX_' + str(Lenght)].tail(1).values
Var3 = Var3[0]
#Var4 = stocks['UPP'].tail(1).values
#Var4 = Var4[0]
#Var5 = stocks['LOW'].tail(1).values
#Var5 = Var5[0]
#Ordens de Compra
if Var=='buy':
result,price= buy(Ativo,lot)
#print(result)
import time
time.sleep(60)
#Ordens de Venda
if Var=='sell':
result,price= sell(Ativo,lot)
#print(result)
import time
time.sleep(60)
#Imprimindo os valores de cada indicador
if Var1 < Var2:
print(f'Entry Point - Buy //// Price {Var1:.0f}')
print(f'Price {Var1:.0f} < {Var2:.0f} (Mínimo {Lenght} dias)')
elif Var1 > Var3:
print(f'Entry Point - Sell //// Price {Var3:.2f}')
print(f'Price {Var1:.0f} > {Var3:.0f} (Máximo {Lenght} dias)')
else:
print('No Entry Point --- :( ')
print(f'Mínimo - {Var2:.0f} < Price {Var1:.0f} < Máximo {Var3:.0f}')
'''
if Var1 > Var4:
print('Price: ', Var1, '>', 'UPP: ', Var4, ' & ', 'CCI > 100 ', Var5)
elif Var1 < Var5:
print('Price: ', Var1, '<', 'LOW: ', Var4, ' & ', 'CCI < -100 ', Var5)
else:
print('No entry point {::} :(')
'''
#import time
#time.sleep(60) # Sleep for 1 seconds
| [
"dmarcelino@live.com"
] | dmarcelino@live.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.